From cec9fd61b48674d35feb7a5d42efa14839ec0d87 Mon Sep 17 00:00:00 2001 From: Rafael Franzke Date: Wed, 11 Oct 2023 14:19:28 +0200 Subject: [PATCH 1/8] Revendor g/g to v1.84.0 --- Makefile | 4 + go.mod | 53 +- go.sum | 109 +- vendor/github.com/BurntSushi/toml/.gitignore | 2 +- vendor/github.com/BurntSushi/toml/COMPATIBLE | 1 - vendor/github.com/BurntSushi/toml/README.md | 187 +- vendor/github.com/BurntSushi/toml/decode.go | 312 +-- .../BurntSushi/toml/decode_go116.go | 4 +- vendor/github.com/BurntSushi/toml/doc.go | 22 +- vendor/github.com/BurntSushi/toml/encode.go | 250 +- vendor/github.com/BurntSushi/toml/error.go | 120 +- vendor/github.com/BurntSushi/toml/lex.go | 24 +- vendor/github.com/BurntSushi/toml/meta.go | 11 +- vendor/github.com/BurntSushi/toml/parse.go | 62 +- .../cyphar/filepath-securejoin/.travis.yml | 8 +- .../cyphar/filepath-securejoin/README.md | 20 +- .../cyphar/filepath-securejoin/VERSION | 2 +- .../cyphar/filepath-securejoin/join.go | 25 +- .../cyphar/filepath-securejoin/vendor.conf | 1 - .../emicklei/go-restful/v3/CHANGES.md | 11 +- .../emicklei/go-restful/v3/README.md | 5 +- .../emicklei/go-restful/v3/route.go | 17 +- .../emicklei/go-restful/v3/route_builder.go | 45 +- .../etcd-druid/api/v1alpha1/types_etcd.go | 9 +- .../api/v1alpha1/zz_generated.deepcopy.go | 5 + .../extensions/pkg/controller/cmd/options.go | 19 +- .../operatingsystemconfig/actuator.go | 4 +- .../controller/operatingsystemconfig/bash.go | 108 + .../operatingsystemconfig/oscommon/README.md | 4 + .../oscommon/actuator/actuator_reconcile.go | 6 +- .../oscommon/actuator/actuator_restore.go | 2 +- .../oscommon/actuator/actuator_util.go | 4 + .../operatingsystemconfig/oscommon/add.go | 6 + .../operatingsystemconfig/reconciler.go | 59 +- .../extensions/pkg/util/shoot_clients.go | 2 +- .../gardener/hack/.ci/component_descriptor | 20 +- .../gardener/hack/add-license-header.sh | 2 - .../gardener/gardener/hack/check-charts.sh | 2 +- .../gardener/gardener/hack/check-docforge.sh | 39 - .../gardener/gardener/hack/check-generate.sh | 16 +- .../gardener/gardener/hack/check-imports.sh | 18 + .../gardener/hack/check-license-header.sh | 4 +- .../hack/check-skaffold-deps-for-binary.sh | 122 + .../gardener/hack/check-skaffold-deps.sh | 86 +- .../gardener/gardener/hack/check.sh | 2 +- .../gardener/hack/compare-k8s-api-groups.sh | 90 + .../gardener/hack/compute-k8s-controllers.sh | 175 ++ .../gardener/hack/gardener-extensions-down.sh | 5 +- .../gardener/gardener/hack/generate-crds.sh | 177 +- .../hack/generate-logcheck-symlinks.sh | 36 + .../gardener/gardener/hack/generate.sh | 11 +- .../gardener/gardener/hack/hook-me.sh | 6 +- .../gardener/gardener/hack/install.sh | 2 +- .../gardener/gardener/hack/kind-up.sh | 6 +- .../gardener/gardener/hack/test-cover.sh | 2 +- .../gardener/gardener/hack/test-e2e-local.sh | 3 + .../gardener/hack/test-integration.sh | 2 +- .../github.com/gardener/gardener/hack/test.sh | 2 +- .../gardener/gardener/hack/tools.go | 1 + .../gardener/gardener/hack/tools.mk | 40 +- .../gardener/gardener/hack/update-codegen.sh | 51 +- .../gardener/gardener/hack/update-protobuf.sh | 27 +- .../gardener/hack/update-skaffold-deps.sh | 20 + .../gardener/gardener/pkg/apis/core/types.go | 4 +- .../pkg/apis/core/types_cloudprofile.go | 19 + .../apis/core/types_controllerregistration.go | 2 +- .../gardener/pkg/apis/core/types_seed.go | 24 - .../gardener/pkg/apis/core/types_shoot.go | 4 + .../core/v1beta1/constants/types_constants.go | 24 +- .../pkg/apis/core/v1beta1/defaults.go | 8 +- .../core/v1beta1/defaults_cloudprofile.go | 8 + .../pkg/apis/core/v1beta1/generated.pb.go | 2116 +++++++---------- .../pkg/apis/core/v1beta1/generated.proto | 45 +- .../pkg/apis/core/v1beta1/helper/condition.go | 20 +- .../pkg/apis/core/v1beta1/helper/helper.go | 335 ++- .../apis/core/v1beta1/types_cloudprofile.go | 20 + .../pkg/apis/core/v1beta1/types_common.go | 4 +- .../v1beta1/types_controllerregistration.go | 2 +- .../pkg/apis/core/v1beta1/types_seed.go | 31 +- .../pkg/apis/core/v1beta1/types_shoot.go | 6 + .../core/v1beta1/zz_generated.conversion.go | 70 +- .../core/v1beta1/zz_generated.deepcopy.go | 57 +- .../core/v1beta1/zz_generated.defaults.go | 1 + .../pkg/apis/core/zz_generated.deepcopy.go | 57 +- .../extensions/v1alpha1/helper/filecodec.go | 35 +- .../v1alpha1/types_operatingsystemconfig.go | 52 +- .../v1alpha1/zz_generated.deepcopy.go | 42 +- .../pkg/apis/operator/v1alpha1/types.go | 2 +- .../pkg/apis/resources/v1alpha1/types.go | 3 + .../gardener/pkg/chartrenderer/default.go | 26 +- .../gardener/pkg/chartrenderer/renderer.go | 3 - .../pkg/client/kubernetes/cache/aggregator.go | 20 +- .../pkg/client/kubernetes/cache/errors.go | 53 - .../pkg/client/kubernetes/chartapplier.go | 45 +- .../gardener/pkg/client/kubernetes/client.go | 25 +- .../pkg/extensions/customresources.go | 2 +- .../pkg/gardenlet/apis/config/types.go | 5 + .../apis/config/v1alpha1/defaults.go | 3 + .../gardenlet/apis/config/v1alpha1/types.go | 7 + .../v1alpha1/zz_generated.conversion.go | 4 + .../config/v1alpha1/zz_generated.deepcopy.go | 10 + .../apis/config/zz_generated.deepcopy.go | 10 + .../gardener/pkg/utils/errors/unwrap.go | 4 +- .../gardener/gardener/pkg/utils/flow/flow.go | 64 +- .../gardener/gardener/pkg/utils/flow/graph.go | 4 + .../gardener/pkg/utils/flow/taskfn.go | 18 - .../gardener/pkg/utils/gardener/machines.go | 170 ++ .../gardener/pkg/utils/gardener/seed.go | 15 - .../gardener/pkg/utils/gardener/shoot.go | 15 + .../pkg/utils/kubernetes/controllers.go | 118 + .../pkg/utils/kubernetes/health/daemonset.go | 2 +- .../gardener/pkg/utils/kubernetes/object.go | 52 +- .../gardener/pkg/utils/secrets/certificate.go | 4 +- .../gardener/pkg/utils/secrets/vpn_tlsauth.go | 1 - .../gardener/pkg/utils/version/version.go | 38 + .../pkg/apis/machine/register.go | 20 +- .../pkg/apis/machine/types.go | 588 +---- .../v1alpha1/alicoud_machineclass_types.go | 111 - .../v1alpha1/aws_machineclass_types.go | 212 -- .../v1alpha1/azure_machineclass_types.go | 223 -- .../pkg/apis/machine/v1alpha1/defaults.go | 25 - .../v1alpha1/gcp_machineclass_types.go | 123 - .../apis/machine/v1alpha1/machine_types.go | 10 +- .../v1alpha1/openstack_machineclass_types.go | 114 - .../v1alpha1/packet_machineclass_types.go | 76 - .../pkg/apis/machine/v1alpha1/register.go | 20 +- .../pkg/apis/machine/v1alpha1/shared_types.go | 4 +- .../v1alpha1/zz_generated.conversion.go | 1894 +-------------- .../machine/v1alpha1/zz_generated.deepcopy.go | 1294 +--------- .../pkg/apis/machine/zz_generated.deepcopy.go | 1294 +--------- .../pkg/apis/machine/zz_generated.defaults.go | 14 + .../github.com/google/go-cmp/cmp/compare.go | 38 +- .../cmp/{export_unsafe.go => export.go} | 5 - .../google/go-cmp/cmp/export_panic.go | 16 - .../value/{pointer_unsafe.go => pointer.go} | 3 - .../cmp/internal/value/pointer_purego.go | 34 - .../github.com/google/go-cmp/cmp/options.go | 84 +- vendor/github.com/google/go-cmp/cmp/path.go | 46 +- .../google/go-cmp/cmp/report_reflect.go | 2 +- .../github.com/hashicorp/errwrap/errwrap.go | 9 + .../ironcore-dev/vgopath/.gitignore | 33 + .../ironcore-dev/vgopath/.golangci.yaml | 20 + .../ironcore-dev/vgopath/CODEOWNERS | 2 + .../ironcore-dev/vgopath/CODE_OF_CONDUCT.md | 1 + .../github.com/ironcore-dev/vgopath/LICENSE | 201 ++ .../github.com/ironcore-dev/vgopath/Makefile | 105 + .../github.com/ironcore-dev/vgopath/README.md | 36 + .../vgopath/internal/cmd/version/version.go | 40 + .../vgopath/internal/cmd/vgopath/exec/exec.go | 109 + .../vgopath/internal/cmd/vgopath/vgopath.go | 63 + .../vgopath/internal/link/link.go | 284 +++ .../vgopath/internal/module/module.go | 184 ++ .../vgopath/internal/version/version.go | 38 + .../github.com/ironcore-dev/vgopath/main.go | 27 + vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 29 + vendor/github.com/onsi/ginkgo/v2/README.md | 6 +- vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 94 +- .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 4 +- .../onsi/ginkgo/v2/internal/global/init.go | 11 + .../onsi/ginkgo/v2/internal/group.go | 5 +- .../onsi/ginkgo/v2/internal/node.go | 10 +- .../onsi/ginkgo/v2/internal/suite.go | 24 + .../onsi/ginkgo/v2/internal/writer.go | 6 +- .../github.com/onsi/ginkgo/v2/types/config.go | 1 + .../github.com/onsi/ginkgo/v2/types/errors.go | 4 +- .../github.com/onsi/ginkgo/v2/types/types.go | 4 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 29 + vendor/github.com/onsi/gomega/gomega_dsl.go | 10 +- vendor/github.com/onsi/gomega/matchers.go | 39 +- .../gomega/matchers/have_http_body_matcher.go | 9 +- .../gomega/matchers/match_error_matcher.go | 25 +- .../x/crypto/chacha20/chacha_arm64.go | 4 +- .../x/crypto/chacha20/chacha_arm64.s | 4 +- .../x/crypto/chacha20/chacha_noasm.go | 4 +- vendor/golang.org/x/crypto/ed25519/ed25519.go | 71 - vendor/golang.org/x/crypto/ssh/certs.go | 38 +- vendor/golang.org/x/crypto/ssh/client_auth.go | 96 +- vendor/golang.org/x/crypto/ssh/doc.go | 1 + vendor/golang.org/x/crypto/ssh/handshake.go | 44 +- vendor/golang.org/x/crypto/ssh/keys.go | 376 ++- vendor/golang.org/x/crypto/ssh/messages.go | 14 + vendor/golang.org/x/crypto/ssh/mux.go | 6 + vendor/golang.org/x/crypto/ssh/server.go | 11 +- vendor/golang.org/x/net/http2/server.go | 78 +- vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 2 +- vendor/golang.org/x/sys/cpu/hwcap_linux.go | 4 +- .../sys/internal/unsafeheader/unsafeheader.go | 30 - vendor/golang.org/x/sys/unix/ptrace_darwin.go | 6 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 6 - vendor/golang.org/x/sys/unix/syscall_aix.go | 2 - .../golang.org/x/sys/unix/syscall_darwin.go | 186 -- .../x/sys/unix/syscall_darwin_amd64.go | 1 - .../x/sys/unix/syscall_darwin_arm64.go | 1 - .../x/sys/unix/syscall_dragonfly.go | 198 -- .../golang.org/x/sys/unix/syscall_freebsd.go | 192 -- vendor/golang.org/x/sys/unix/syscall_linux.go | 115 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 261 -- .../golang.org/x/sys/unix/syscall_openbsd.go | 74 - .../golang.org/x/sys/unix/syscall_solaris.go | 18 - .../x/sys/unix/syscall_zos_s390x.go | 1 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 9 + .../x/sys/unix/zerrors_linux_386.go | 2 + .../x/sys/unix/zerrors_linux_amd64.go | 2 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 2 + .../x/sys/unix/zerrors_linux_loong64.go | 4 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 22 - .../x/sys/unix/zsyscall_aix_ppc64.go | 22 - .../x/sys/unix/zsyscall_darwin_amd64.go | 40 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 149 -- .../x/sys/unix/zsyscall_darwin_arm64.go | 40 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 149 -- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 22 - .../x/sys/unix/zsyscall_freebsd_386.go | 22 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 22 - .../x/sys/unix/zsyscall_freebsd_arm.go | 22 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 22 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 22 - .../x/sys/unix/zsyscall_illumos_amd64.go | 10 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 22 - .../x/sys/unix/zsyscall_netbsd_386.go | 22 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 22 - .../x/sys/unix/zsyscall_netbsd_arm.go | 22 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 22 - .../x/sys/unix/zsyscall_openbsd_386.go | 32 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 22 - .../x/sys/unix/zsyscall_openbsd_arm.go | 32 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 32 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 32 +- .../x/sys/unix/zsyscall_openbsd_ppc64.go | 32 +- .../x/sys/unix/zsyscall_openbsd_riscv64.go | 32 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 256 +- .../x/sys/unix/zsyscall_zos_s390x.go | 11 - .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 8 +- .../x/sys/unix/ztypes_linux_riscv64.go | 4 + .../golang.org/x/sys/windows/exec_windows.go | 89 +- .../x/sys/windows/security_windows.go | 21 +- .../x/sys/windows/syscall_windows.go | 42 +- .../golang.org/x/sys/windows/types_windows.go | 7 + .../x/sys/windows/zsyscall_windows.go | 28 +- .../apimachinery/pkg/util/runtime/runtime.go | 15 +- vendor/k8s.io/helm/pkg/chartutil/create.go | 2 +- .../k8s.io/helm/pkg/chartutil/requirements.go | 61 +- vendor/k8s.io/helm/pkg/engine/engine.go | 14 + vendor/k8s.io/helm/pkg/version/version.go | 2 +- vendor/modules.txt | 67 +- .../controller-runtime/pkg/cache/cache.go | 46 + .../pkg/cache/multi_namespace_cache.go | 4 + .../tools/setup-envtest/README.md | 4 +- 275 files changed, 6229 insertions(+), 11153 deletions(-) delete mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE delete mode 100644 vendor/github.com/cyphar/filepath-securejoin/vendor.conf create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/bash.go delete mode 100755 vendor/github.com/gardener/gardener/hack/check-docforge.sh create mode 100755 vendor/github.com/gardener/gardener/hack/check-skaffold-deps-for-binary.sh create mode 100755 vendor/github.com/gardener/gardener/hack/compare-k8s-api-groups.sh create mode 100755 vendor/github.com/gardener/gardener/hack/compute-k8s-controllers.sh create mode 100755 vendor/github.com/gardener/gardener/hack/generate-logcheck-symlinks.sh create mode 100755 vendor/github.com/gardener/gardener/hack/update-skaffold-deps.sh delete mode 100644 vendor/github.com/gardener/gardener/pkg/client/kubernetes/cache/errors.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/gardener/machines.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllers.go delete mode 100644 vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/alicoud_machineclass_types.go delete mode 100644 vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/aws_machineclass_types.go delete mode 100644 vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/azure_machineclass_types.go delete mode 100644 vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/defaults.go delete mode 100644 vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/gcp_machineclass_types.go delete mode 100644 vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/openstack_machineclass_types.go delete mode 100644 vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/packet_machineclass_types.go rename vendor/github.com/google/go-cmp/cmp/{export_unsafe.go => export.go} (94%) delete mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go rename vendor/github.com/google/go-cmp/cmp/internal/value/{pointer_unsafe.go => pointer.go} (95%) delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/ironcore-dev/vgopath/.gitignore create mode 100644 vendor/github.com/ironcore-dev/vgopath/.golangci.yaml create mode 100644 vendor/github.com/ironcore-dev/vgopath/CODEOWNERS create mode 100644 vendor/github.com/ironcore-dev/vgopath/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/ironcore-dev/vgopath/LICENSE create mode 100644 vendor/github.com/ironcore-dev/vgopath/Makefile create mode 100644 vendor/github.com/ironcore-dev/vgopath/README.md create mode 100644 vendor/github.com/ironcore-dev/vgopath/internal/cmd/version/version.go create mode 100644 vendor/github.com/ironcore-dev/vgopath/internal/cmd/vgopath/exec/exec.go create mode 100644 vendor/github.com/ironcore-dev/vgopath/internal/cmd/vgopath/vgopath.go create mode 100644 vendor/github.com/ironcore-dev/vgopath/internal/link/link.go create mode 100644 vendor/github.com/ironcore-dev/vgopath/internal/module/module.go create mode 100644 vendor/github.com/ironcore-dev/vgopath/internal/version/version.go create mode 100644 vendor/github.com/ironcore-dev/vgopath/main.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go diff --git a/Makefile b/Makefile index 6597492fe..be778d7c3 100644 --- a/Makefile +++ b/Makefile @@ -65,6 +65,10 @@ docker-images: # Rules for verification, formatting, linting, testing and cleaning # ##################################################################### +.PHONY: tidy +tidy: + @GO111MODULE=on go mod tidy + .PHONY: revendor revendor: @GO111MODULE=on go mod tidy diff --git a/go.mod b/go.mod index 8d6facb1b..e6a5802d5 100644 --- a/go.mod +++ b/go.mod @@ -4,22 +4,22 @@ go 1.21 require ( github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 - github.com/gardener/gardener v1.81.0 + github.com/gardener/gardener v1.84.0 github.com/go-logr/logr v1.2.4 - github.com/onsi/ginkgo/v2 v2.11.0 - github.com/onsi/gomega v1.27.10 + github.com/onsi/ginkgo/v2 v2.13.0 + github.com/onsi/gomega v1.29.0 github.com/spf13/cobra v1.7.0 golang.org/x/tools v0.13.0 - k8s.io/api v0.28.2 - k8s.io/apimachinery v0.28.2 - k8s.io/code-generator v0.28.2 - k8s.io/component-base v0.28.2 + k8s.io/api v0.28.3 + k8s.io/apimachinery v0.28.3 + k8s.io/code-generator v0.28.3 + k8s.io/component-base v0.28.3 k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 - sigs.k8s.io/controller-runtime v0.16.2 + sigs.k8s.io/controller-runtime v0.16.3 ) require ( - github.com/BurntSushi/toml v1.0.0 // indirect + github.com/BurntSushi/toml v1.2.1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect @@ -27,16 +27,16 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bronze1man/yaml2json v0.0.0-20211227013850-8972abeaea25 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cyphar/filepath-securejoin v0.2.2 // indirect + github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fatih/color v1.15.0 // indirect github.com/fluent/fluent-operator/v2 v2.2.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/gardener/etcd-druid v0.19.2 // indirect + github.com/gardener/etcd-druid v0.20.1 // indirect github.com/gardener/hvpa-controller/api v0.5.0 // indirect - github.com/gardener/machine-controller-manager v0.48.1 // indirect + github.com/gardener/machine-controller-manager v0.50.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-logr/zapr v1.2.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect @@ -50,15 +50,16 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ironcore-dev/vgopath v0.1.3 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 // indirect @@ -83,13 +84,13 @@ require ( go.uber.org/mock v0.2.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.13.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.15.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/term v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -102,18 +103,18 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect istio.io/api v0.0.0-20230217221049-9d422bf48675 // indirect istio.io/client-go v1.17.1 // indirect - k8s.io/apiextensions-apiserver v0.28.2 // indirect + k8s.io/apiextensions-apiserver v0.28.3 // indirect k8s.io/autoscaler/vertical-pod-autoscaler v0.14.0 // indirect - k8s.io/client-go v0.28.2 // indirect + k8s.io/client-go v0.28.3 // indirect k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect - k8s.io/helm v2.16.1+incompatible // indirect + k8s.io/helm v2.17.0+incompatible // indirect k8s.io/klog v1.0.0 // indirect k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-aggregator v0.28.2 // indirect + k8s.io/kube-aggregator v0.28.3 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/kubelet v0.28.2 // indirect - k8s.io/metrics v0.28.2 // indirect - sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230707163321-8a64e5f3bd78 // indirect + k8s.io/kubelet v0.28.3 // indirect + k8s.io/metrics v0.28.3 // indirect + sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20231015215740-bf15e44028f9 // indirect sigs.k8s.io/controller-tools v0.13.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/go.sum b/go.sum index 669192df0..ef8cc49ab 100644 --- a/go.sum +++ b/go.sum @@ -49,8 +49,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= -github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= @@ -94,8 +94,8 @@ github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnht github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -105,8 +105,8 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -128,14 +128,14 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/gardener/etcd-druid v0.19.2 h1:Z8TTbmVUxZ7UWU5iJAwUHUI6A9E5Mfd5JcvokVfYH1A= -github.com/gardener/etcd-druid v0.19.2/go.mod h1:0Q9nKPiONDac/Gr0SZYFkVXHGt/Yt//rcRfDIUfftZo= -github.com/gardener/gardener v1.81.0 h1:pFmDRTCnImXD4H1B6guBZRtCUBBTonib8Ua5DmGlUSk= -github.com/gardener/gardener v1.81.0/go.mod h1:HPeLu4C0lD0B4m40pdMxIy9tiIOrAe1GLCTKnKqgmdg= +github.com/gardener/etcd-druid v0.20.1 h1:o6F4higujfg7dvBXvk+yPb86+3t2+XLE0Hmw5W1kXtM= +github.com/gardener/etcd-druid v0.20.1/go.mod h1:1tAeHycB0Vb2GfCX6sUCc6V6frGrCQI//quVg4K3GNA= +github.com/gardener/gardener v1.84.0 h1:sfW8RljFT6vaShlHV04RW/99GkTdyiwfsoUDEehVNVE= +github.com/gardener/gardener v1.84.0/go.mod h1:3XZ+Yn5iHl0acqUIVZ0K78p1OYWp52sRBQOJW6sCRR8= github.com/gardener/hvpa-controller/api v0.5.0 h1:f4F3O7YUrenwh4S3TgPREPiB287JjjUiUL18OqPLyAA= github.com/gardener/hvpa-controller/api v0.5.0/go.mod h1:QQl3ELkCaki+8RhXl0FZMfvnm0WCGwGJlGmrxJj6lvM= -github.com/gardener/machine-controller-manager v0.48.1 h1:Oxr5e6gRm7P40Ds4nGlga/0nmfF7cH4rOfjthR6Mm38= -github.com/gardener/machine-controller-manager v0.48.1/go.mod h1:Axeu1Oh3agySk0oR4T+FUNax41Ni2K8tuksu8KRHuh0= +github.com/gardener/machine-controller-manager v0.50.0 h1:3dcQjzueFU1TGgprV00adjb3OCR99myTBx8DQGxywks= +github.com/gardener/machine-controller-manager v0.50.0/go.mod h1:RySZ40AgbNV/wMq60G/w49kb+okbj5Xs1A6usz5Pm/I= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -227,8 +227,8 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -262,8 +262,9 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -278,6 +279,8 @@ github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ironcore-dev/vgopath v0.1.3 h1:/g3QJ29VrUkYEy52kcUhtvQ3mxfbMIlI1uvEbmt6S4E= +github.com/ironcore-dev/vgopath v0.1.3/go.mod h1:edfsCmU2M4r2N+t4RebSluq//tF3vzogyiDDhcf7MXs= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -347,12 +350,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -447,8 +450,8 @@ golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -526,8 +529,8 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -600,11 +603,11 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -819,31 +822,31 @@ istio.io/client-go v1.17.1/go.mod h1:mLTRYYFxHctzUbt8Iclgj+Sueq34+qC2ZEJTn6BxRuE k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= -k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= -k8s.io/apiextensions-apiserver v0.28.2 h1:J6/QRWIKV2/HwBhHRVITMLYoypCoPY1ftigDM0Kn+QU= -k8s.io/apiextensions-apiserver v0.28.2/go.mod h1:5tnkxLGa9nefefYzWuAlWZ7RZYuN/765Au8cWLA6SRg= +k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= +k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= +k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= +k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= -k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= -k8s.io/apiserver v0.28.2 h1:rBeYkLvF94Nku9XfXyUIirsVzCzJBs6jMn3NWeHieyI= -k8s.io/apiserver v0.28.2/go.mod h1:f7D5e8wH8MWcKD7azq6Csw9UN+CjdtXIVQUyUhrtb+E= +k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= +k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/apiserver v0.28.3 h1:8Ov47O1cMyeDzTXz0rwcfIIGAP/dP7L8rWbEljRcg5w= +k8s.io/apiserver v0.28.3/go.mod h1:YIpM+9wngNAv8Ctt0rHG4vQuX/I5rvkEMtZtsxW2rNM= k8s.io/autoscaler/vertical-pod-autoscaler v0.9.0/go.mod h1:PwWTGRRCxefhAezrDbG/tRYSAW7etHjjMPAr8fXKVAA= k8s.io/autoscaler/vertical-pod-autoscaler v0.14.0 h1:HkQHkcuwVP3BgJpVqTGeYHro83qGBj8mWotygHZND1k= k8s.io/autoscaler/vertical-pod-autoscaler v0.14.0/go.mod h1:w6/LjLR3DPQd57vlgvgbpzpuJKsCiily0+OzQI+nyfI= k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= -k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= +k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= +k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.28.2 h1:u47guga1rCWLnEnffF09p+cqj8B20oHOLoQ1lb1HGtQ= -k8s.io/code-generator v0.28.2/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A= +k8s.io/code-generator v0.28.3 h1:I847QvdpYx7xKiG2KVQeCSyNF/xU9TowaDAg601mvlw= +k8s.io/code-generator v0.28.3/go.mod h1:A2EAHTRYvCvBrb/MM2zZBNipeCk3f8NtpdNIKawC43M= k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= -k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= -k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= +k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= +k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -851,8 +854,8 @@ k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/helm v2.16.1+incompatible h1:L+k810plJlaGWEw1EszeT4deK8XVaKxac1oGcuB+WDc= -k8s.io/helm v2.16.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= +k8s.io/helm v2.17.0+incompatible h1:Bpn6o1wKLYqKM3+Osh8e+1/K2g/GsQJ4F4yNF2+deao= +k8s.io/helm v2.17.0+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -862,17 +865,17 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-aggregator v0.28.2 h1:tCjAfB1p/v18yD2NpegNQRuahzyA/szFfcRARnpjDeo= -k8s.io/kube-aggregator v0.28.2/go.mod h1:g4hZVjC4KhJtZHV2pyiRBiU6AdBA/sAjh9Y9GJC/SbU= +k8s.io/kube-aggregator v0.28.3 h1:CVbj3+cpshSHR5dWPzLYx3sVpIDEPLlzMSxY/lAc9cM= +k8s.io/kube-aggregator v0.28.3/go.mod h1:5DyLevbRTcWnT1f9b+lB3BfbXC1w7gDa/OtB6kKInCw= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kubelet v0.28.2 h1:wqe5zKtVhNWwtdABU0mpcWVe8hc6VdVvs2kqQridZRw= -k8s.io/kubelet v0.28.2/go.mod h1:rvd0e7T5TjPcfZvy62P90XhFzp0IhPIOy+Pqy3Rtipo= +k8s.io/kubelet v0.28.3 h1:bp/uIf1R5F61BlFvFtzc4PDEiK7TtFcw3wFJlc0V0LM= +k8s.io/kubelet v0.28.3/go.mod h1:E3NHYbp/v45Ao6AD0EOZnqO3L0R6Haks6Nm0+bnFwtU= k8s.io/metrics v0.18.3/go.mod h1:TkuJE3ezDZ1ym8pYkZoEzJB7HDiFE7qxl+EmExEBoPA= -k8s.io/metrics v0.28.2 h1:Z/oMk5SmiT/Ji1SaWOPfW2l9W831BLO9/XxDq9iS3ak= -k8s.io/metrics v0.28.2/go.mod h1:QTIIdjMrq+KodO+rmp6R9Pr1LZO8kTArNtkWoQXw0sw= +k8s.io/metrics v0.28.3 h1:w2s3kVi7HulXqCVDFkF4hN/OsL1tXTTb4Biif995h/g= +k8s.io/metrics v0.28.3/go.mod h1:OZZ23AHFojPzU6r3xoHGRUcV3I9pauLua+07sAUbwLc= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= @@ -880,10 +883,10 @@ k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU= -sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230707163321-8a64e5f3bd78 h1:WmfMsXeG/lrERxQHSuROAyrToWwkUMJ6UrVDvSFtbdk= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230707163321-8a64e5f3bd78/go.mod h1:B6HLcvOy2S1qq2eWOFm9xepiKPMIc8Z9OXSPsnUDaR4= +sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= +sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20231015215740-bf15e44028f9 h1:O27fSMHw4u0h+Rj8bNzcZk5jY0iZCO0J8/mCpigpnbw= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20231015215740-bf15e44028f9/go.mod h1:TF/lVLWS+JNNaVqJuDDictY2hZSXSsIHCx4FClMvqFg= sigs.k8s.io/controller-tools v0.13.0 h1:NfrvuZ4bxyolhDBt/rCZhDnx3M2hzlhgo5n3Iv2RykI= sigs.k8s.io/controller-tools v0.13.0/go.mod h1:5vw3En2NazbejQGCeWKRrE7q4P+CW8/klfVqP8QZkgA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore index cd11be965..fe79e3add 100644 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -1,2 +1,2 @@ -toml.test +/toml.test /toml-test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index f621b0119..000000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1 +0,0 @@ -Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index cc13f8667..3651cfa96 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -1,6 +1,5 @@ TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. +reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). @@ -10,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show v0.4.0`). -This library requires Go 1.13 or newer; install it with: +This library requires Go 1.13 or newer; add it to your go.mod with: % go get github.com/BurntSushi/toml@latest @@ -19,16 +18,7 @@ It also comes with a TOML validator CLI tool: % go install github.com/BurntSushi/toml/cmd/tomlv@latest % tomlv some-toml-file.toml -### Testing -This package passes all tests in [toml-test] for both the decoder and the -encoder. - -[toml-test]: https://github.com/BurntSushi/toml-test - ### Examples -This package works similar to how the Go standard library handles XML and JSON. -Namely, data is loaded into Go values via reflection. - For the simplest example, consider some TOML file as just a list of keys and values: @@ -40,7 +30,7 @@ Perfection = [ 6, 28, 496, 8128 ] DOB = 1987-07-05T05:45:00Z ``` -Which could be defined in Go as: +Which can be decoded with: ```go type Config struct { @@ -48,20 +38,15 @@ type Config struct { Cats []string Pi float64 Perfection []int - DOB time.Time // requires `import time` + DOB time.Time } -``` - -And then decoded with: -```go var conf Config -err := toml.Decode(tomlData, &conf) -// handle error +_, err := toml.Decode(tomlData, &conf) ``` -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: ```toml some_key_NAME = "wat" @@ -73,139 +58,63 @@ type TOML struct { } ``` -Beware that like other most other decoders **only exported fields** are -considered when encoding and decoding; private fields are silently ignored. +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. ### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces -Here's an example that automatically parses duration strings into -`time.Duration` values: +Here's an example that automatically parses values in a `mail.Address`: ```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] ``` -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: +Can be decoded with: ```go -type duration struct { - time.Duration +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address } -func (d *duration) UnmarshalText(text []byte) error { +func (a *address) UnmarshalText(text []byte) error { var err error - d.Duration, err = time.ParseDuration(string(text)) + a.Address, err = mail.ParseAddress(string(text)) return err } + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} ``` To target TOML specifically you can implement `UnmarshalTOML` TOML interface in a similar way. ### More complex usage -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_example/example.{go,toml}`. +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index e24f0c5d5..0ca1dc4fe 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -1,14 +1,18 @@ package toml import ( + "bytes" "encoding" + "encoding/json" "fmt" "io" "io/ioutil" "math" "os" "reflect" + "strconv" "strings" + "time" ) // Unmarshaler is the interface implemented by objects that can unmarshal a @@ -17,16 +21,35 @@ type Unmarshaler interface { UnmarshalTOML(interface{}) error } -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v interface{}) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) return err } +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + // Primitive is a TOML value that hasn't been decoded into a Go value. // // This type can be used for any value, which will cause decoding to be delayed. -// You can use the PrimitiveDecode() function to "manually" decode these values. +// You can use [PrimitiveDecode] to "manually" decode these values. // // NOTE: The underlying representation of a `Primitive` value is subject to // change. Do not rely on it. @@ -42,36 +65,22 @@ type Primitive struct { // The significand precision for float32 and float64 is 24 and 53 bits; this is // the range a natural number can be stored in a float without loss of data. const ( - maxSafeFloat32Int = 16777215 // 2^24-1 - maxSafeFloat64Int = 9007199254740991 // 2^53-1 + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 ) -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - // Decoder decodes TOML data. // -// TOML tables correspond to Go structs or maps (dealer's choice – they can be -// used interchangeably). +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. // // TOML table arrays correspond to either a slice of structs or a slice of maps. // -// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed -// in the local timezone. +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. +// +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. // // All other TOML types (float, string, int, bool and array) correspond to the // obvious Go types. @@ -80,9 +89,9 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { // interface, in which case any primitive TOML value (floats, strings, integers, // booleans, datetimes) will be converted to a []byte and given to the value's // UnmarshalText method. See the Unmarshaler example for a demonstration with -// time duration strings. +// email addresses. // -// Key mapping +// ### Key mapping // // TOML keys can map to either keys in a Go map or field names in a Go struct. // The special `toml` struct tag can be used to map TOML keys to struct fields @@ -109,6 +118,7 @@ func NewDecoder(r io.Reader) *Decoder { var ( unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() ) // Decode TOML data in to the pointer `v`. @@ -120,10 +130,10 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { s = "%v" } - return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v)) + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) } if rv.IsNil() { - return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v)) + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) } // Check if this is a supported type: struct, map, interface{}, or something @@ -133,7 +143,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { - return MetaData{}, e("cannot decode to type %s", rt) + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) } // TODO: parser should read from io.Reader? Or at the very least, make it @@ -150,30 +160,29 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { md := MetaData{ mapping: p.mapping, - types: p.types, + keyInfo: p.keyInfo, keys: p.ordered, decoded: make(map[string]struct{}, len(p.ordered)), context: nil, + data: data, } return md, md.unify(p.mapping, rv) } -// Decode the TOML data in to the pointer v. +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) // -// See the documentation on Decoder for a description of the decoding process. -func Decode(data string, v interface{}) (MetaData, error) { - return NewDecoder(strings.NewReader(data)).Decode(v) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at path and decode it for you. -func DecodeFile(path string, v interface{}) (MetaData, error) { - fp, err := os.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) } // unify performs a sort of type unification based on the structure of `rv`, @@ -184,7 +193,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) { func (md *MetaData) unify(data interface{}, rv reflect.Value) error { // Special case. Look for a `Primitive` value. // TODO: #76 would make this superfluous after implemented. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + if rv.Type() == primitiveType { // Save the undecoded data and the key context into the primitive // value. context := make(Key, len(md.context)) @@ -196,17 +205,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return nil } - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + return v.UnmarshalTOML(data) } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + if v, ok := rvi.(encoding.TextUnmarshaler); ok { return md.unifyText(data, v) } + // TODO: // The behavior here is incorrect whenever a Go type satisfies the // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or @@ -217,7 +223,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { k := rv.Kind() - // laziness if k >= reflect.Int && k <= reflect.Uint64 { return md.unifyInt(data, rv) } @@ -243,15 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("unsupported type %s", rv.Type()) + if rv.NumMethod() > 0 { // Only support empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) case reflect.Float32, reflect.Float64: return md.unifyFloat64(data, rv) } - return e("unsupported type %s", rv.Kind()) + return md.e("unsupported type %s", rv.Kind()) } func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { @@ -260,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { if mapping == nil { return nil } - return e("type mismatch for %s: expected table but found %T", + return md.e("type mismatch for %s: expected table but found %T", rv.Type().String(), mapping) } @@ -286,13 +290,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { if isUnifiable(subv) { md.decoded[md.context.add(key).String()] = struct{}{} md.context = append(md.context, key) + err := md.unify(datum, subv) if err != nil { return err } md.context = md.context[0 : len(md.context)-1] } else if f.name != "" { - return e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) } } } @@ -300,10 +305,10 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { } func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - if k := rv.Type().Key().Kind(); k != reflect.String { - return fmt.Errorf( - "toml: cannot decode to a map with non-string key type (%s in %q)", - k, rv.Type()) + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) } tmap, ok := mapping.(map[string]interface{}) @@ -321,13 +326,22 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { md.context = append(md.context, k) rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { + + err := md.unify(v, indirect(rvval)) + if err != nil { return err } md.context = md.context[0 : len(md.context)-1] rvkey := indirect(reflect.New(rv.Type().Key())) - rvkey.SetString(k) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + rv.SetMapIndex(rvkey, rvval) } return nil @@ -342,7 +356,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { return md.badtype("slice", data) } if l := datav.Len(); l != rv.Len() { - return e("expected array length %d; got TOML array of length %d", rv.Len(), l) + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) } return md.unifySliceArray(datav, rv) } @@ -375,6 +389,18 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { } func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + if s, ok := data.(string); ok { rv.SetString(s) return nil @@ -383,11 +409,13 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { } func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + rvk := rv.Kind() + if num, ok := data.(float64); ok { - switch rv.Kind() { + switch rvk { case reflect.Float32: if num < -math.MaxFloat32 || num > math.MaxFloat32 { - return e("value %f is out of range for float32", num) + return md.parseErr(errParseRange{i: num, size: rvk.String()}) } fallthrough case reflect.Float64: @@ -399,20 +427,11 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { } if num, ok := data.(int64); ok { - switch rv.Kind() { - case reflect.Float32: - if num < -maxSafeFloat32Int || num > maxSafeFloat32Int { - return e("value %d is out of range for float32", num) - } - fallthrough - case reflect.Float64: - if num < -maxSafeFloat64Int || num > maxSafeFloat64Int { - return e("value %d is out of range for float64", num) - } - rv.SetFloat(float64(num)) - default: - panic("bug") + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) } + rv.SetFloat(float64(num)) return nil } @@ -420,50 +439,46 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { } func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("value %d is out of range for int8", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("value %d is out of range for int16", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("value %d is out of range for int32", num) - } + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("value %d is out of range for uint8", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("value %d is out of range for uint16", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("value %d is out of range for uint32", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") + rv.SetInt(int64(dur)) + return nil } - return nil } - return md.badtype("integer", data) + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil } func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { @@ -488,7 +503,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro return err } s = string(text) - case TextMarshaler: + case encoding.TextMarshaler: text, err := sdata.MarshalText() if err != nil { return err @@ -514,7 +529,30 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro } func (md *MetaData) badtype(dst string, data interface{}) error { - return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst) + return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...interface{}) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) } // rvalue returns a reflect.Value of `v`. All pointers are resolved. @@ -533,7 +571,11 @@ func indirect(v reflect.Value) reflect.Value { if v.Kind() != reflect.Ptr { if v.CanSet() { pv := v.Addr() - if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { return pv } } @@ -549,12 +591,12 @@ func isUnifiable(rv reflect.Value) bool { if rv.CanSet() { return true } - if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { return true } return false } - -func e(format string, args ...interface{}) error { - return fmt.Errorf("toml: "+format, args...) -} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go index eddfb641b..086d0b686 100644 --- a/vendor/github.com/BurntSushi/toml/decode_go116.go +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -7,8 +7,8 @@ import ( "io/fs" ) -// DecodeFS is just like Decode, except it will automatically read the contents -// of the file at `path` from a fs.FS instance. +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { fp, err := fsys.Open(path) if err != nil { diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go index 099c4a77d..81a7c0fe9 100644 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -1,13 +1,11 @@ -/* -Package toml implements decoding and encoding of TOML files. - -This package supports TOML v1.0.0, as listed on https://toml.io - -There is also support for delaying decoding with the Primitive type, and -querying the set of keys in a TOML document with the MetaData type. - -The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, -and can be used to verify if TOML document is valid. It can also be used to -print the type of each key. -*/ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// There is also support for delaying decoding with the Primitive type, and +// querying the set of keys in a TOML document with the MetaData type. +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index dee4e6d31..930e1d521 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -3,6 +3,7 @@ package toml import ( "bufio" "encoding" + "encoding/json" "errors" "fmt" "io" @@ -63,6 +64,12 @@ var dblQuotedReplacer = strings.NewReplacer( "\x7f", `\u007f`, ) +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + // Marshaler is the interface implemented by types that can marshal themselves // into valid TOML. type Marshaler interface { @@ -72,9 +79,12 @@ type Marshaler interface { // Encoder encodes a Go to a TOML document. // // The mapping between Go values and TOML values should be precisely the same as -// for the Decode* functions. +// for [Decode]. // -// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to // encoding the value as custom TOML. // // If you want to write arbitrary binary data then you will need to use @@ -85,6 +95,17 @@ type Marshaler interface { // // Go maps will be sorted alphabetically by key for deterministic output. // +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// // Encoding Go values without a corresponding TOML representation will return an // error. Examples of this includes maps with non-string keys, slices with nil // elements, embedded non-struct types, and nested slices containing maps or @@ -109,7 +130,7 @@ func NewEncoder(w io.Writer) *Encoder { } } -// Encode writes a TOML representation of the Go value to the Encoder's writer. +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. // // An error is returned if the value given cannot be encoded to a valid TOML // document. @@ -136,18 +157,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { } func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case: time needs to be in ISO8601 format. - // - // Special case: if we can marshal the type to text, then we used that. This - // prevents the encoder for handling these types as generic structs (or - // whatever the underlying type of a TextMarshaler is). - switch t := rv.Interface().(type) { - case time.Time, encoding.TextMarshaler, Marshaler: + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): enc.writeKeyValue(key, rv, false) return - // TODO: #76 would make this superfluous after implemented. - case Primitive: - enc.encode(key, reflect.ValueOf(t.undecoded)) + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) return } @@ -212,18 +230,44 @@ func (enc *Encoder) eElement(rv reflect.Value) { if err != nil { encPanic(err) } - enc.writeQuoted(string(s)) + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) return case encoding.TextMarshaler: s, err := v.MarshalText() if err != nil { encPanic(err) } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } enc.writeQuoted(string(s)) return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) } switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return case reflect.String: enc.writeQuoted(rv.String()) case reflect.Bool: @@ -259,7 +303,7 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Interface: enc.eElement(rv.Elem()) default: - encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface())) + encPanic(fmt.Errorf("unexpected type: %T", rv.Interface())) } } @@ -280,7 +324,7 @@ func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { length := rv.Len() enc.wf("[") for i := 0; i < length; i++ { - elem := rv.Index(i) + elem := eindirect(rv.Index(i)) enc.eElement(elem) if i != length-1 { enc.wf(", ") @@ -294,7 +338,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { encPanic(errNoKey) } for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) + trv := eindirect(rv.Index(i)) if isNil(trv) { continue } @@ -319,7 +363,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) { } func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { - switch rv := eindirect(rv); rv.Kind() { + switch rv.Kind() { case reflect.Map: enc.eMap(key, rv, inline) case reflect.Struct: @@ -341,7 +385,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { var mapKeysDirect, mapKeysSub []string for _, mapKey := range rv.MapKeys() { k := mapKey.String() - if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) { + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { mapKeysSub = append(mapKeysSub, k) } else { mapKeysDirect = append(mapKeysDirect, k) @@ -351,7 +395,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { var writeMapKeys = func(mapKeys []string, trailC bool) { sort.Strings(mapKeys) for i, mapKey := range mapKeys { - val := rv.MapIndex(reflect.ValueOf(mapKey)) + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) if isNil(val) { continue } @@ -379,6 +423,13 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { const is32Bit = (32 << (^uint(0) >> 63)) == 32 +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { // Write keys for fields directly under this key first, because if we write // a field that creates a new table then all keys under it will be in that @@ -395,31 +446,25 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { addFields = func(rt reflect.Type, rv reflect.Value, start []int) { for i := 0; i < rt.NumField(); i++ { f := rt.Field(i) - if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields. + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { continue } - frv := rv.Field(i) + frv := eindirect(rv.Field(i)) // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. // // Non-struct anonymous fields use the normal encoding logic. - if f.Anonymous { - t := f.Type - switch t.Kind() { - case reflect.Struct: - if getOptions(f.Tag).name == "" { - addFields(t, frv, append(start, f.Index...)) - continue - } - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" { - if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), append(start, f.Index...)) - } - continue - } + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue } } @@ -445,7 +490,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { writeFields := func(fields [][]int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) - fieldVal := rv.FieldByIndex(fieldIndex) + fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) if isNil(fieldVal) { /// Don't write anything for nil fields. continue @@ -459,7 +504,8 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if opts.name != "" { keyName = opts.name } - if opts.omitempty && isEmpty(fieldVal) { + + if opts.omitempty && enc.isEmpty(fieldVal) { continue } if opts.omitzero && isZero(fieldVal) { @@ -498,6 +544,21 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { if isNil(rv) || !rv.IsValid() { return nil } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + switch rv.Kind() { case reflect.Bool: return tomlBool @@ -509,7 +570,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { case reflect.Float32, reflect.Float64: return tomlFloat case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { + if isTableArray(rv) { return tomlArrayHash } return tomlArray @@ -519,67 +580,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { return tomlString case reflect.Map: return tomlHash - case reflect.Struct: - if _, ok := rv.Interface().(time.Time); ok { - return tomlDatetime - } - if isMarshaler(rv) { - return tomlString - } - return tomlHash default: - if isMarshaler(rv) { - return tomlString - } - encPanic(errors.New("unsupported type: " + rv.Kind().String())) panic("unreachable") } } func isMarshaler(rv reflect.Value) bool { - switch rv.Interface().(type) { - case encoding.TextMarshaler: - return true - case Marshaler: - return true - } - - // Someone used a pointer receiver: we can make it work for pointer values. - if rv.CanAddr() { - if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok { - return true - } - if _, ok := rv.Addr().Interface().(Marshaler); ok { - return true - } - } - return false + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) } -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false } - /// Don't allow nil. - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - if tomlTypeOfGo(rv.Index(i)) == nil { + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { encPanic(errArrayNilElement) } - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) + if ret && !typeEqual(tomlHash, tt) { + ret = false + } } - return firstType + return ret } type tagOptions struct { @@ -620,10 +649,26 @@ func isZero(rv reflect.Value) bool { return false } -func isEmpty(rv reflect.Value) bool { +func (enc *Encoder) isEmpty(rv reflect.Value) bool { switch rv.Kind() { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !enc.isEmpty(rv.Field(i)) { + return false + } + } + return true case reflect.Bool: return !rv.Bool() } @@ -638,16 +683,15 @@ func (enc *Encoder) newline() { // Write a key/value pair: // -// key = +// key = // // This is also used for "k = v" in inline tables; so something like this will // be written in three calls: // -// ┌────────────────────┐ -// │ ┌───┐ ┌─────┐│ -// v v v v vv -// key = {k = v, k2 = v2} -// +// ┌───────────────────┐ +// │ ┌───┐ ┌────┐│ +// v v v v vv +// key = {k = 1, k2 = 2} func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { if len(key) == 0 { encPanic(errNoKey) @@ -675,13 +719,25 @@ func encPanic(err error) { panic(tomlEncodeError{err}) } +// Resolve any level of pointers to the actual value (e.g. **string → string). func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } return v } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) } func isNil(rv reflect.Value) bool { diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index 36edc4655..f4f390e64 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -5,57 +5,60 @@ import ( "strings" ) -// ParseError is returned when there is an error parsing the TOML syntax. -// -// For example invalid syntax, duplicate keys, etc. +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. // // In addition to the error message itself, you can also print detailed location -// information with context by using ErrorWithLocation(): +// information with context by using [ErrorWithPosition]: // -// toml: error: Key 'fruit' was already created and cannot be used as an array. +// toml: error: Key 'fruit' was already created and cannot be used as an array. // -// At line 4, column 2-7: +// At line 4, column 2-7: // -// 2 | fruit = [] -// 3 | -// 4 | [[fruit]] # Not allowed -// ^^^^^ +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ // -// Furthermore, the ErrorWithUsage() can be used to print the above with some -// more detailed usage guidance: +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: // -// toml: error: newlines not allowed within inline tables +// toml: error: newlines not allowed within inline tables // -// At line 1, column 18: +// At line 1, column 18: // -// 1 | x = [{ key = 42 # -// ^ +// 1 | x = [{ key = 42 # +// ^ // -// Error help: +// Error help: // -// Inline tables must always be on a single line: +// Inline tables must always be on a single line: // -// table = {key = 42, second = 43} +// table = {key = 42, second = 43} // -// It is invalid to split them over multiple lines like so: +// It is invalid to split them over multiple lines like so: // -// # INVALID -// table = { -// key = 42, -// second = 43 -// } +// # INVALID +// table = { +// key = 42, +// second = 43 +// } // -// Use regular for this: +// Use regular for this: // -// [table] -// key = 42 -// second = 43 +// [table] +// key = 42 +// second = 43 type ParseError struct { Message string // Short technical message. Usage string // Longer message with usage guidance; may be blank. Position Position // Position of the error LastKey string // Last parsed key, may be blank. - Line int // Line the error occurred. Deprecated: use Position. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int err error input string @@ -83,7 +86,7 @@ func (pe ParseError) Error() string { // ErrorWithUsage() returns the error with detailed location context. // -// See the documentation on ParseError. +// See the documentation on [ParseError]. func (pe ParseError) ErrorWithPosition() string { if pe.input == "" { // Should never happen, but just in case. return pe.Error() @@ -124,13 +127,17 @@ func (pe ParseError) ErrorWithPosition() string { // ErrorWithUsage() returns the error with detailed location context and usage // guidance. // -// See the documentation on ParseError. +// See the documentation on [ParseError]. func (pe ParseError) ErrorWithUsage() string { m := pe.ErrorWithPosition() if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { - return m + "Error help:\n\n " + - strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") + - "\n" + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" } return m } @@ -160,6 +167,11 @@ type ( errLexInvalidDate struct{ v string } errLexInlineTableNL struct{} errLexStringNL struct{} + errParseRange struct { + i interface{} // int or float + size string // "int64", "uint16", etc. + } + errParseDuration struct{ d string } ) func (e errLexControl) Error() string { @@ -179,6 +191,10 @@ func (e errLexInlineTableNL) Error() string { return "newlines not allowed withi func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } const usageEscape = ` A '\' inside a "-delimited string is interpreted as an escape character. @@ -227,3 +243,37 @@ Instead use """ or ''' to split strings over multiple lines: string = """Hello, world!""" ` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ + uint8 │ 0 │ 255 + uint16 │ 0 │ 65535 + uint32 │ 0 │ 4294967295 + uint64 │ 0 │ 1.8 × 10¹⁸ + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 63ef20f47..d4d70871d 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -82,7 +82,7 @@ func (lx *lexer) nextItem() item { return item default: lx.state = lx.state(lx) - //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) } } } @@ -128,6 +128,11 @@ func (lx lexer) getPos() Position { } func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} lx.start = lx.pos } @@ -711,7 +716,17 @@ func lexMultilineString(lx *lexer) stateFn { if lx.peek() == '"' { /// Check if we already lexed 5 's; if so we have 6 now, and /// that's just too many man! - if strings.HasSuffix(lx.current(), `"""""`) { + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { return lx.errorf(`unexpected '""""""'`) } lx.backup() @@ -756,7 +771,7 @@ func lexRawString(lx *lexer) stateFn { } // lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'''" has already been consumed and +// a string. It assumes that the beginning ''' has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { r := lx.next() @@ -802,8 +817,7 @@ func lexMultilineRawString(lx *lexer) stateFn { // lexMultilineStringEscape consumes an escaped character. It assumes that the // preceding '\\' has already been consumed. func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { + if isNL(lx.next()) { /// \ escaping newline. return lexMultilineString } lx.backup() diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index 868619fb9..71847a041 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -12,10 +12,11 @@ import ( type MetaData struct { context Key // Used only during decoding. + keyInfo map[string]keyInfo mapping map[string]interface{} - types map[string]tomlType keys []Key decoded map[string]struct{} + data []byte // Input file; for errors. } // IsDefined reports if the key exists in the TOML data. @@ -50,8 +51,8 @@ func (md *MetaData) IsDefined(key ...string) bool { // Type will return the empty string if given an empty key or a key that does // not exist. Keys are case sensitive. func (md *MetaData) Type(key ...string) string { - if typ, ok := md.types[Key(key).String()]; ok { - return typ.typeString() + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() } return "" } @@ -70,7 +71,7 @@ func (md *MetaData) Keys() []Key { // Undecoded returns all keys that have not been decoded in the order in which // they appear in the original TOML document. // -// This includes keys that haven't been decoded because of a Primitive value. +// This includes keys that haven't been decoded because of a [Primitive] value. // Once the Primitive value is decoded, the keys will be considered decoded. // // Also note that decoding into an empty interface will result in no decoding, @@ -88,7 +89,7 @@ func (md *MetaData) Undecoded() []Key { return undecoded } -// Key represents any TOML key, including key groups. Use (MetaData).Keys to get +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get // values of this type. type Key []string diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 8269cca17..d2542d6f9 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -16,12 +16,18 @@ type parser struct { currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. - ordered []Key // List of keys in the order that they appear in the TOML data. + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. mapping map[string]interface{} // Map keyname → key value. - types map[string]tomlType // Map keyname → TOML type. implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). } +type keyInfo struct { + pos Position + tomlType tomlType +} + func parse(data string) (p *parser, err error) { defer func() { if r := recover(); r != nil { @@ -57,8 +63,8 @@ func parse(data string) (p *parser, err error) { } p = &parser{ + keyInfo: make(map[string]keyInfo), mapping: make(map[string]interface{}), - types: make(map[string]tomlType), lx: lex(data), ordered: make([]Key, 0), implicits: make(map[string]struct{}), @@ -74,6 +80,15 @@ func parse(data string) (p *parser, err error) { return p, nil } +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + func (p *parser) panicItemf(it item, format string, v ...interface{}) { panic(ParseError{ Message: fmt.Sprintf(format, v...), @@ -94,7 +109,7 @@ func (p *parser) panicf(format string, v ...interface{}) { func (p *parser) next() item { it := p.lx.nextItem() - //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val) + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) if it.typ == itemError { if it.err != nil { panic(ParseError{ @@ -146,7 +161,7 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemTableEnd, name.typ) p.addContext(key, false) - p.setType("", tomlHash) + p.setType("", tomlHash, item.pos) p.ordered = append(p.ordered, key) case itemArrayTableStart: // [[ .. ]] name := p.nextPos() @@ -158,7 +173,7 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemArrayTableEnd, name.typ) p.addContext(key, true) - p.setType("", tomlArrayHash) + p.setType("", tomlArrayHash, item.pos) p.ordered = append(p.ordered, key) case itemKeyStart: // key = .. outerContext := p.context @@ -181,8 +196,9 @@ func (p *parser) topLevel(item item) { } /// Set value. - val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ) + vItem := p.next() + val, typ := p.value(vItem, false) + p.set(p.currentKey, val, typ, vItem.pos) p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Remove the context we added (preserving any context from [tbl] lines). @@ -220,7 +236,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { case itemString: return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: @@ -266,7 +282,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) { // So mark the former as a bug but the latter as a legitimate user // error. if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val) + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) } else { p.bug("Expected integer value, but got '%s'.", it.val) } @@ -304,7 +320,7 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { num, err := strconv.ParseFloat(val, 64) if err != nil { if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val) + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) } else { p.panicItemf(it, "Invalid float value: %q", it.val) } @@ -343,9 +359,8 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) { } func (p *parser) valueArray(it item) (interface{}, tomlType) { - p.setType(p.currentKey, tomlArray) + p.setType(p.currentKey, tomlArray, it.pos) - // p.setType(p.currentKey, typ) var ( types []tomlType @@ -414,7 +429,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom /// Set the value. val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ) + p.set(p.currentKey, val, typ, it.pos) p.ordered = append(p.ordered, p.context.add(p.currentKey)) hash[p.currentKey] = val @@ -533,9 +548,10 @@ func (p *parser) addContext(key Key, array bool) { } // set calls setValue and setType. -func (p *parser) set(key string, val interface{}, typ tomlType) { +func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { p.setValue(key, val) - p.setType(key, typ) + p.setType(key, typ, pos) + } // setValue sets the given key to the given value in the current context. @@ -599,7 +615,7 @@ func (p *parser) setValue(key string, value interface{}) { // // Note that if `key` is empty, then the type given will be applied to the // current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { +func (p *parser) setType(key string, typ tomlType, pos Position) { keyContext := make(Key, 0, len(p.context)+1) keyContext = append(keyContext, p.context...) if len(key) > 0 { // allow type setting for hashes @@ -611,7 +627,7 @@ func (p *parser) setType(key string, typ tomlType) { if len(keyContext) == 0 { keyContext = Key{""} } - p.types[keyContext.String()] = typ + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} } // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and @@ -619,7 +635,7 @@ func (p *parser) setType(key string, typ tomlType) { func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } -func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } func (p *parser) addImplicitContext(key Key) { p.addImplicit(key) p.addContext(key, false) @@ -647,7 +663,7 @@ func stripFirstNewline(s string) string { } // Remove newlines inside triple-quoted strings if a line ends with "\". -func stripEscapedNewlines(s string) string { +func (p *parser) stripEscapedNewlines(s string) string { split := strings.Split(s, "\n") if len(split) < 1 { return s @@ -679,6 +695,10 @@ func stripEscapedNewlines(s string) string { continue } + if i == len(split)-1 { + p.panicf("invalid escape: '\\ '") + } + split[i] = line[:len(line)-1] // Remove \ if len(split)-1 > i { split[i+1] = strings.TrimLeft(split[i+1], " \t\r") @@ -706,10 +726,8 @@ func (p *parser) replaceEscapes(it item, str string) string { switch s[r] { default: p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" case ' ', '\t': p.panicItemf(it, "invalid escape: '\\%c'", s[r]) - return "" case 'b': replaced = append(replaced, rune(0x0008)) r += 1 diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml index 3938f3834..b94ff8cf9 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml +++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml @@ -4,10 +4,12 @@ language: go go: - - 1.7.x - - 1.8.x + - 1.13.x + - 1.16.x - tip - +arch: + - AMD64 + - ppc64le os: - linux - osx diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index 49b2baa9f..3624617c8 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -7,6 +7,19 @@ standard library][go#20126]. The purpose of this function is to be a "secure" alternative to `filepath.Join`, and in particular it provides certain guarantees that are not provided by `filepath.Join`. +> **NOTE**: This code is *only* safe if you are not at risk of other processes +> modifying path components after you've used `SecureJoin`. If it is possible +> for a malicious process to modify path components of the resolved path, then +> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There +> are some Linux kernel patches I'm working on which might allow for a better +> solution.][lwn-obeneath] +> +> In addition, with a slightly modified API it might be possible to use +> `O_PATH` and verify that the opened path is actually the resolved one -- but +> I have not done that yet. I might add it in the future as a helper function +> to help users verify the path (we can't just return `/proc/self/fd/` +> because that doesn't always work transparently for all users). + This is the function prototype: ```go @@ -16,8 +29,8 @@ func SecureJoin(root, unsafePath string) (string, error) This library **guarantees** the following: * If no error is set, the resulting string **must** be a child path of - `SecureJoin` and will not contain any symlink path components (they will all - be expanded). + `root` and will not contain any symlink path components (they will all be + expanded). * When expanding symlinks, all symlink path components **must** be resolved relative to the provided root. In particular, this can be considered a @@ -25,7 +38,7 @@ This library **guarantees** the following: these symlinks will **not** be expanded lexically (`filepath.Clean` is not called on the input before processing). -* Non-existant path components are unaffected by `SecureJoin` (similar to +* Non-existent path components are unaffected by `SecureJoin` (similar to `filepath.EvalSymlinks`'s semantics). * The returned path will always be `filepath.Clean`ed and thus not contain any @@ -57,6 +70,7 @@ func SecureJoin(root, unsafePath string) (string, error) { } ``` +[lwn-obeneath]: https://lwn.net/Articles/767547/ [go#20126]: https://github.com/golang/go/issues/20126 ### License ### diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index ee1372d33..717903969 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.2.2 +0.2.3 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index c4ca3d713..7dd08dbbd 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -12,39 +12,20 @@ package securejoin import ( "bytes" + "errors" "os" "path/filepath" "strings" "syscall" - - "github.com/pkg/errors" ) -// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been -// evaluated in attempting to securely join the two given paths. -var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join") - // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is // effectively a more broad version of os.IsNotExist. func IsNotExist(err error) bool { - // If it's a bone-fide ENOENT just bail. - if os.IsNotExist(errors.Cause(err)) { - return true - } - // Check that it's not actually an ENOTDIR, which in some cases is a more // convoluted case of ENOENT (usually involving weird paths). - var errno error - switch err := errors.Cause(err).(type) { - case *os.PathError: - errno = err.Err - case *os.LinkError: - errno = err.Err - case *os.SyscallError: - errno = err.Err - } - return errno == syscall.ENOTDIR || errno == syscall.ENOENT + return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) } // SecureJoinVFS joins the two given path components (similar to Join) except @@ -68,7 +49,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { n := 0 for unsafePath != "" { if n > 255 { - return "", ErrSymlinkLoop + return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP} } // Next path component, p. diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf deleted file mode 100644 index 66bb574b9..000000000 --- a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf +++ /dev/null @@ -1 +0,0 @@ -github.com/pkg/errors v0.8.0 diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 02a73ccfd..5edd5a7ca 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,6 +1,15 @@ # Change history of go-restful -## [v3.10.1] - 2022-11-19 +## [v3.11.0] - 2023-08-19 + +- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. + +## [v3.10.2] - 2023-03-09 - DO NOT USE + +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. + +## [v3.10.1] - 2022-11-19 - DO NOT USE - fix broken 3.10.0 by using path package for joining paths diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 0625359dc..e3e30080e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -79,7 +79,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Content encoding (gzip,deflate) of request and response payloads - Automatic responses on OPTIONS (using a filter) - Automatic CORS request handling (using a filter) -- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) +- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi)) - Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging @@ -96,6 +96,7 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources @@ -108,4 +109,4 @@ There are several hooks to customize the behavior of the go-restful package. Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2023, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index ea05b3da8..306c44be7 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -40,7 +40,8 @@ type Route struct { ParameterDocs []*Parameter ResponseErrors map[int]ResponseError DefaultResponse *ResponseError - ReadSample, WriteSample interface{} // structs that model an example request or response payload + ReadSample, WriteSample interface{} // structs that model an example request or response payload + WriteSamples []interface{} // if more than one return types is possible (oneof) then this will contain multiple values // Extra information used to store custom information about the route. Metadata map[string]interface{} @@ -164,7 +165,13 @@ func tokenizePath(path string) []string { if "/" == path { return nil } - return strings.Split(strings.TrimLeft(path, "/"), "/") + if TrimRightSlashEnabled { + // 3.9.0 + return strings.Split(strings.Trim(path, "/"), "/") + } else { + // 3.10.2 + return strings.Split(strings.TrimLeft(path, "/"), "/") + } } // for debugging @@ -177,4 +184,8 @@ func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } -var TrimRightSlashEnabled = false +// TrimRightSlashEnabled controls whether +// - path on route building is using path.Join +// - the path of the incoming request is trimmed of its slash suffux. +// Value of true matches the behavior of <= 3.9.0 +var TrimRightSlashEnabled = true diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 830ebf148..75168c12e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -31,17 +31,18 @@ type RouteBuilder struct { typeNameHandleFunc TypeNameHandleFunction // required // documentation - doc string - notes string - operation string - readSample, writeSample interface{} - parameters []*Parameter - errorMap map[int]ResponseError - defaultResponse *ResponseError - metadata map[string]interface{} - extensions map[string]interface{} - deprecated bool - contentEncodingEnabled *bool + doc string + notes string + operation string + readSample interface{} + writeSamples []interface{} + parameters []*Parameter + errorMap map[int]ResponseError + defaultResponse *ResponseError + metadata map[string]interface{} + extensions map[string]interface{} + deprecated bool + contentEncodingEnabled *bool } // Do evaluates each argument with the RouteBuilder itself. @@ -135,9 +136,9 @@ func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { return p } -// Writes tells what resource type will be written as the response payload. Optional. -func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { - b.writeSample = sample +// Writes tells which one of the resource types will be written as the response payload. Optional. +func (b *RouteBuilder) Writes(samples ...interface{}) *RouteBuilder { + b.writeSamples = samples // oneof return b } @@ -342,19 +343,29 @@ func (b *RouteBuilder) Build() Route { ResponseErrors: b.errorMap, DefaultResponse: b.defaultResponse, ReadSample: b.readSample, - WriteSample: b.writeSample, + WriteSamples: b.writeSamples, Metadata: b.metadata, Deprecated: b.deprecated, contentEncodingEnabled: b.contentEncodingEnabled, allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, } + // set WriteSample if one specified + if len(b.writeSamples) == 1 { + route.WriteSample = b.writeSamples[0] + } route.Extensions = b.extensions route.postBuild() return route } -func concatPath(path1, path2 string) string { - return path.Join(path1, path2) +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + + if TrimRightSlashEnabled { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } else { + return path.Join(rootPath, routePath) + } } var anonymousFuncCount int32 diff --git a/vendor/github.com/gardener/etcd-druid/api/v1alpha1/types_etcd.go b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/types_etcd.go index d899b1f53..a4686d4e3 100644 --- a/vendor/github.com/gardener/etcd-druid/api/v1alpha1/types_etcd.go +++ b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/types_etcd.go @@ -178,6 +178,13 @@ type BackupSpec struct { // DeltaSnapshotMemoryLimit defines the memory limit after which delta snapshots will be taken // +optional DeltaSnapshotMemoryLimit *resource.Quantity `json:"deltaSnapshotMemoryLimit,omitempty"` + // DeltaSnapshotRetentionPeriod defines the duration for which delta snapshots will be retained, excluding the latest snapshot set. + // The value should be a string formatted as a duration (e.g., '1s', '2m', '3h', '4d') + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9][0-9]*([.][0-9]+)?(s|m|h|d))+$" + // +optional + DeltaSnapshotRetentionPeriod *metav1.Duration `json:"deltaSnapshotRetentionPeriod,omitempty"` + // SnapshotCompression defines the specification for compression of Snapshots. // +optional SnapshotCompression *CompressionSpec `json:"compression,omitempty"` @@ -437,7 +444,7 @@ func (e *Etcd) GetConfigmapName() string { // GetCompactionJobName returns the compaction job name for the Etcd. func (e *Etcd) GetCompactionJobName() string { - return fmt.Sprintf("%s-compact-job", string(e.UID[:6])) + return fmt.Sprintf("%s-compactor", e.Name) } // GetOrdinalPodName returns the Etcd pod name based on the ordinal. diff --git a/vendor/github.com/gardener/etcd-druid/api/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/zz_generated.deepcopy.go index 563aae732..b8f81b5e1 100644 --- a/vendor/github.com/gardener/etcd-druid/api/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/etcd-druid/api/v1alpha1/zz_generated.deepcopy.go @@ -83,6 +83,11 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { x := (*in).DeepCopy() *out = &x } + if in.DeltaSnapshotRetentionPeriod != nil { + in, out := &in.DeltaSnapshotRetentionPeriod, &out.DeltaSnapshotRetentionPeriod + *out = new(metav1.Duration) + **out = **in + } if in.SnapshotCompression != nil { in, out := &in.SnapshotCompression, &out.SnapshotCompression *out = new(CompressionSpec) diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/options.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/options.go index c5139a47a..edfcd5751 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/options.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/cmd/options.go @@ -73,9 +73,10 @@ const ( // GardenerVersionFlag is the name of the command line flag containing the Gardener version. GardenerVersionFlag = "gardener-version" - // GardenletManagesMCMFlag is the name of the command line flag containing the Gardener version. - // TODO(rfranzke): Remove this flag when the MachineControllerManagerDeployment feature gate is promoted to GA. - GardenletManagesMCMFlag = "gardenlet-manages-mcm" + // GardenletUsesGardenerNodeAgentFlag is the name of the command line flag specifying whether gardenlet's feature gate + // 'UseGardenerNodeAgent' is activated. + // TODO(rfranzke): Remove this flag when the UseGardenerNodeAgent feature gate is promoted to GA. + GardenletUsesGardenerNodeAgentFlag = "gardenlet-uses-gardener-node-agent" // LogLevelFlag is the name of the command line flag containing the log level. LogLevelFlag = "log-level" @@ -473,8 +474,8 @@ type SwitchConfig struct { type GeneralOptions struct { // GardenerVersion is the version of the Gardener. GardenerVersion string - // GardenletManagesMCM specifies whether gardenlet manages the machine-controller-manager. - GardenletManagesMCM bool + // GardenletUsesGardenerNodeAgent specifies whether gardenlet's feature gate 'UseGardenerNodeAgent' is activated. + GardenletUsesGardenerNodeAgent bool config *GeneralConfig } @@ -483,13 +484,13 @@ type GeneralOptions struct { type GeneralConfig struct { // GardenerVersion is the version of the Gardener. GardenerVersion string - // GardenletManagesMCM specifies whether gardenlet manages the machine-controller-manager. - GardenletManagesMCM bool + // GardenletUsesGardenerNodeAgent specifies whether gardenlet's feature gate 'UseGardenerNodeAgent' is activated. + GardenletUsesGardenerNodeAgent bool } // Complete implements Complete. func (r *GeneralOptions) Complete() error { - r.config = &GeneralConfig{r.GardenerVersion, r.GardenletManagesMCM} + r.config = &GeneralConfig{r.GardenerVersion, r.GardenletUsesGardenerNodeAgent} return nil } @@ -501,5 +502,5 @@ func (r *GeneralOptions) Completed() *GeneralConfig { // AddFlags implements Flagger.AddFlags. func (r *GeneralOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&r.GardenerVersion, GardenerVersionFlag, "", "Version of the gardenlet.") - fs.BoolVar(&r.GardenletManagesMCM, GardenletManagesMCMFlag, false, "Specifies whether gardenlet manages the machine-controller-manager.") + fs.BoolVar(&r.GardenletUsesGardenerNodeAgent, GardenletUsesGardenerNodeAgentFlag, false, "Specifies whether gardenlet's feature gate 'UseGardenerNodeAgent' is activated.") } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/actuator.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/actuator.go index 5ee848297..667c63b50 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/actuator.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/actuator.go @@ -25,13 +25,13 @@ import ( // Actuator acts upon OperatingSystemConfig resources. type Actuator interface { // Reconcile the operating system config. - Reconcile(context.Context, logr.Logger, *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, error) + Reconcile(context.Context, logr.Logger, *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, []extensionsv1alpha1.Unit, []extensionsv1alpha1.File, error) // Delete the operating system config. Delete(context.Context, logr.Logger, *extensionsv1alpha1.OperatingSystemConfig) error // ForceDelete forcefully deletes the operating system config. ForceDelete(context.Context, logr.Logger, *extensionsv1alpha1.OperatingSystemConfig) error // Restore the operating system config. - Restore(context.Context, logr.Logger, *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, error) + Restore(context.Context, logr.Logger, *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, []extensionsv1alpha1.Unit, []extensionsv1alpha1.File, error) // Migrate the operating system config. Migrate(context.Context, logr.Logger, *extensionsv1alpha1.OperatingSystemConfig) error } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/bash.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/bash.go new file mode 100644 index 000000000..e103a4d73 --- /dev/null +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/bash.go @@ -0,0 +1,108 @@ +// Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operatingsystemconfig + +import ( + "context" + "fmt" + "path" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + extensionsv1alpha1helper "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/helper" + "github.com/gardener/gardener/pkg/utils" + kubernetesutils "github.com/gardener/gardener/pkg/utils/kubernetes" +) + +// FilesToDiskScript is a utility function which generates a bash script for writing the provided files to the disk. +func FilesToDiskScript(ctx context.Context, reader client.Reader, namespace string, files []extensionsv1alpha1.File) (string, error) { + var out string + + for _, file := range files { + data, err := dataForFileContent(ctx, reader, namespace, &file.Content) + if err != nil { + return "", err + } + + out += ` +mkdir -p "` + path.Dir(file.Path) + `" +` + catDataIntoFile(file.Path, data, pointer.BoolDeref(file.Content.TransmitUnencoded, false)) + + if file.Permissions != nil { + out += ` +` + fmt.Sprintf(`chmod "%04o" "%s"`, *file.Permissions, file.Path) + } + } + + return out, nil +} + +// UnitsToDiskScript is a utility function which generates a bash script for writing the provided units and their +// drop-ins to the disk. +func UnitsToDiskScript(units []extensionsv1alpha1.Unit) string { + var out string + + for _, unit := range units { + unitFilePath := path.Join("/", "etc", "systemd", "system", unit.Name) + + if unit.Content != nil { + out += ` +` + catDataIntoFile(unitFilePath, []byte(*unit.Content), false) + } + + if len(unit.DropIns) > 0 { + unitDropInsDirectoryPath := unitFilePath + ".d" + out += ` +mkdir -p "` + unitDropInsDirectoryPath + `"` + + for _, dropIn := range unit.DropIns { + out += ` +` + catDataIntoFile(path.Join(unitDropInsDirectoryPath, dropIn.Name), []byte(dropIn.Content), false) + } + } + } + + return out +} + +func dataForFileContent(ctx context.Context, c client.Reader, namespace string, content *extensionsv1alpha1.FileContent) ([]byte, error) { + if inline := content.Inline; inline != nil { + return extensionsv1alpha1helper.Decode(inline.Encoding, []byte(inline.Data)) + } + + secret := &corev1.Secret{} + if err := c.Get(ctx, kubernetesutils.Key(namespace, content.SecretRef.Name), secret); err != nil { + return nil, err + } + + return secret.Data[content.SecretRef.DataKey], nil +} + +func catDataIntoFile(path string, data []byte, transmitUnencoded bool) string { + if transmitUnencoded { + return ` +cat << EOF > "` + path + `" +` + string(data) + ` +EOF` + } + + return ` +cat << EOF | base64 -d > "` + path + `" +` + utils.EncodeBase64(data) + ` +EOF` +} diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/README.md b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/README.md index cbd60396f..bd092dfb8 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/README.md +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/README.md @@ -2,6 +2,10 @@ [![Go Report Card](https://goreportcard.com/badge/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon)](https://goreportcard.com/report/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon) +**⚠️This package is deprecated and will be removed as soon as the [`UseGardenerNodeAgent` feature gate](../../../../../docs/deployment/feature_gates.md) has been promoted to GA.** + +--- + Project Gardener implements the automated management and operation of [Kubernetes](https://kubernetes.io/) clusters as a service. Its main principle is to leverage Kubernetes concepts for all of its tasks. Recently, most of the vendor specific logic has been developed [in-tree](https://github.com/gardener/gardener). However, the project has grown to a size where it is very hard to extend, maintain, and test. With [GEP-1](https://github.com/gardener/gardener/blob/master/docs/proposals/01-extensibility.md) we have proposed how the architecture can be changed in a way to support external controllers that contain their very own vendor specifics. This way, we can keep Gardener core clean and independent. diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_reconcile.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_reconcile.go index cb81b60d1..f9bc4a781 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_reconcile.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_reconcile.go @@ -24,11 +24,11 @@ import ( ) // Reconcile reconciles the update of a OperatingSystemConfig regenerating the os-specific format -func (a *Actuator) Reconcile(ctx context.Context, log logr.Logger, config *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, error) { +func (a *Actuator) Reconcile(ctx context.Context, log logr.Logger, config *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, []extensionsv1alpha1.Unit, []extensionsv1alpha1.File, error) { cloudConfig, cmd, err := CloudConfigFromOperatingSystemConfig(ctx, log, a.client, config, a.generator) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("could not generate cloud config: %w", err) + return nil, nil, nil, nil, nil, nil, fmt.Errorf("could not generate cloud config: %w", err) } - return cloudConfig, cmd, OperatingSystemConfigUnitNames(config), OperatingSystemConfigFilePaths(config), nil + return cloudConfig, cmd, OperatingSystemConfigUnitNames(config), OperatingSystemConfigFilePaths(config), nil, nil, nil } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_restore.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_restore.go index 20d0c3a9e..cad48b49c 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_restore.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_restore.go @@ -23,6 +23,6 @@ import ( ) // Restore reconciles the update of a OperatingSystemConfig regenerating the os-specific format -func (a *Actuator) Restore(ctx context.Context, log logr.Logger, config *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, error) { +func (a *Actuator) Restore(ctx context.Context, log logr.Logger, config *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, []extensionsv1alpha1.Unit, []extensionsv1alpha1.File, error) { return a.Reconcile(ctx, log, config) } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_util.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_util.go index 159042edf..feb3363ff 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_util.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator/actuator_util.go @@ -42,6 +42,10 @@ func CloudConfigFromOperatingSystemConfig( ) { files := make([]*commonosgenerator.File, 0, len(config.Spec.Files)) for _, file := range config.Spec.Files { + if file.Content.ImageRef != nil { + continue + } + data, err := DataForFileContent(ctx, c, config.Namespace, &file.Content) if err != nil { return nil, nil, err diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/add.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/add.go index 01db6d168..816f6a465 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/add.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/add.go @@ -38,6 +38,9 @@ type AddOptions struct { // AddToManagerWithOptions adds a controller with the given Options to the given manager. // The opts.Reconciler is being set with a newly instantiated actuator. +// Deprecated: The `oscommon` package is deprecated and will be removed as soon as the UseGardenerNodeAgent feature gate +// has been promoted to GA. +// TODO(rfranzke): Remove the `oscommon` package after the UseGardenerNodeAgent feature gate has been promoted to GA. func AddToManagerWithOptions(ctx context.Context, mgr manager.Manager, ctrlName string, osTypes []string, generator generator.Generator, opts AddOptions) error { return operatingsystemconfig.Add(mgr, operatingsystemconfig.AddArgs{ Actuator: actuator.NewActuator(mgr, ctrlName, generator), @@ -48,6 +51,9 @@ func AddToManagerWithOptions(ctx context.Context, mgr manager.Manager, ctrlName } // AddToManager adds a controller with the default Options. +// Deprecated: The `oscommon` package is deprecated and will be removed as soon as the UseGardenerNodeAgent feature gate +// has been promoted to GA. +// TODO(rfranzke): Remove the `oscommon` package after the UseGardenerNodeAgent feature gate has been promoted to GA. func AddToManager(ctx context.Context, mgr manager.Manager, ctrlName string, osTypes []string, generator generator.Generator) error { return AddToManagerWithOptions(ctx, mgr, ctrlName, osTypes, generator, DefaultAddOptions) } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/reconciler.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/reconciler.go index cd90d87a8..c26dc293d 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/reconciler.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/reconciler.go @@ -124,22 +124,25 @@ func (r *reconciler) reconcile( } log.Info("Starting the reconciliation of OperatingSystemConfig") - userData, command, units, files, err := r.actuator.Reconcile(ctx, log, osc) + userData, command, unitNames, fileNames, extensionUnits, extensionFiles, err := r.actuator.Reconcile(ctx, log, osc) if err != nil { _ = r.statusUpdater.Error(ctx, log, osc, reconcilerutils.ReconcileErrCauseOrErr(err), operationType, "Error reconciling OperatingSystemConfig") return reconcilerutils.ReconcileErr(err) } - secret, err := r.reconcileOSCResultSecret(ctx, osc, userData) - if err != nil { - _ = r.statusUpdater.Error(ctx, log, osc, reconcilerutils.ReconcileErrCauseOrErr(err), operationType, "Could not apply secret for generated cloud config") - return reconcilerutils.ReconcileErr(err) + var secret *corev1.Secret + if len(userData) > 0 { + secret, err = r.reconcileOSCResultSecret(ctx, osc, userData) + if err != nil { + _ = r.statusUpdater.Error(ctx, log, osc, reconcilerutils.ReconcileErrCauseOrErr(err), operationType, "Could not apply secret for generated cloud config") + return reconcilerutils.ReconcileErr(err) + } } patch := client.MergeFrom(osc.DeepCopy()) - setOSCStatus(osc, secret, command, units, files) + setOSCStatus(osc, secret, command, unitNames, fileNames, extensionUnits, extensionFiles) if err := r.client.Status().Patch(ctx, osc, patch); err != nil { - _ = r.statusUpdater.Error(ctx, log, osc, reconcilerutils.ReconcileErrCauseOrErr(err), gardencorev1beta1.LastOperationTypeRestore, "Could not update units and secret ref.") + _ = r.statusUpdater.Error(ctx, log, osc, reconcilerutils.ReconcileErrCauseOrErr(err), gardencorev1beta1.LastOperationTypeRestore, "Could not update status") return reconcilerutils.ReconcileErr(err) } if err := r.statusUpdater.Success(ctx, log, osc, operationType, "Successfully reconciled OperatingSystemConfig"); err != nil { @@ -169,20 +172,23 @@ func (r *reconciler) restore( } log.Info("Starting the restoration of OperatingSystemConfig") - userData, command, units, files, err := r.actuator.Restore(ctx, log, osc) + userData, command, units, files, extensionUnits, extensionFiles, err := r.actuator.Restore(ctx, log, osc) if err != nil { _ = r.statusUpdater.Error(ctx, log, osc, reconcilerutils.ReconcileErrCauseOrErr(err), gardencorev1beta1.LastOperationTypeRestore, "Error restoring OperatingSystemConfig") return reconcilerutils.ReconcileErr(err) } - secret, err := r.reconcileOSCResultSecret(ctx, osc, userData) - if err != nil { - _ = r.statusUpdater.Error(ctx, log, osc, reconcilerutils.ReconcileErrCauseOrErr(err), gardencorev1beta1.LastOperationTypeRestore, "Could not apply secret for generated cloud config") - return reconcilerutils.ReconcileErr(err) + var secret *corev1.Secret + if len(userData) > 0 { + secret, err = r.reconcileOSCResultSecret(ctx, osc, userData) + if err != nil { + _ = r.statusUpdater.Error(ctx, log, osc, reconcilerutils.ReconcileErrCauseOrErr(err), gardencorev1beta1.LastOperationTypeRestore, "Could not apply secret for generated cloud config") + return reconcilerutils.ReconcileErr(err) + } } patch := client.MergeFrom(osc.DeepCopy()) - setOSCStatus(osc, secret, command, units, files) + setOSCStatus(osc, secret, command, units, files, extensionUnits, extensionFiles) if err := r.client.Status().Patch(ctx, osc, patch); err != nil { _ = r.statusUpdater.Error(ctx, log, osc, reconcilerutils.ReconcileErrCauseOrErr(err), gardencorev1beta1.LastOperationTypeRestore, "Could not update units and secret ref.") return reconcilerutils.ReconcileErr(err) @@ -296,16 +302,27 @@ func (r *reconciler) reconcileOSCResultSecret(ctx context.Context, osc *extensio return secret, nil } -func setOSCStatus(osc *extensionsv1alpha1.OperatingSystemConfig, secret *corev1.Secret, command *string, units, files []string) { - osc.Status.CloudConfig = &extensionsv1alpha1.CloudConfig{ - SecretRef: corev1.SecretReference{ - Name: secret.Name, - Namespace: secret.Namespace, - }, +func setOSCStatus( + osc *extensionsv1alpha1.OperatingSystemConfig, + secret *corev1.Secret, + command *string, + units, files []string, + extensionUnits []extensionsv1alpha1.Unit, + extensionFiles []extensionsv1alpha1.File, +) { + if secret != nil { + osc.Status.CloudConfig = &extensionsv1alpha1.CloudConfig{ + SecretRef: corev1.SecretReference{ + Name: secret.Name, + Namespace: secret.Namespace, + }, + } } - osc.Status.Units = units - osc.Status.Files = files if command != nil { osc.Status.Command = command } + osc.Status.Units = units + osc.Status.Files = files + osc.Status.ExtensionUnits = extensionUnits + osc.Status.ExtensionFiles = extensionFiles } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot_clients.go b/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot_clients.go index 9570a197b..cb3c8ebbe 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot_clients.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot_clients.go @@ -111,7 +111,7 @@ func NewClientForShoot(ctx context.Context, c client.Client, namespace string, o // TODO(ary1992): The new rest mapper implementation doesn't return a NoKindMatchError but a ErrGroupDiscoveryFailed // when an API GroupVersion is not present in the cluster. Remove the old restmapper usage once the upstream issue // (https://github.com/kubernetes-sigs/controller-runtime/pull/2425) is fixed. - mapper, err := thirdpartyapiutil.NewDynamicRESTMapper(shootRESTConfig) + mapper, err := thirdpartyapiutil.NewDynamicRESTMapper(shootRESTConfig, thirdpartyapiutil.WithLazyDiscovery) if err != nil { return nil, nil, fmt.Errorf("failed to create new DynamicRESTMapper: %w", err) } diff --git a/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor b/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor index 0a4ac1715..acc328c8e 100755 --- a/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor +++ b/vendor/github.com/gardener/gardener/hack/.ci/component_descriptor @@ -4,10 +4,7 @@ # # COMPONENT_PREFIXES: Set the image prefix that should be used to # determine if an image is defined by another component. -# Defaults to "eu.gcr.io/gardener-project/gardener" -# -# GENERIC_DEPENDENCIES: Set images that are generic dependencies with no specific tag. -# Defaults to "hyperkube,kube-apiserver,kube-controller-manager,kube-scheduler,kube-proxy" +# Defaults to "eu.gcr.io/gardener-project/gardener,europe-docker.pkg.dev/gardener-project" # # COMPONENT_CLI_ARGS: Set all component-cli arguments. # This should be used with care as all defaults are overwritten. @@ -58,10 +55,7 @@ fi if [[ ! -z "$image_vector_path" ]]; then # default environment variables if [[ -z "${COMPONENT_PREFIXES}" ]]; then - COMPONENT_PREFIXES="eu.gcr.io/gardener-project/gardener" - fi - if [[ -z "${GENERIC_DEPENDENCIES}" ]]; then - GENERIC_DEPENDENCIES="hyperkube,kube-apiserver,kube-controller-manager,kube-scheduler,kube-proxy" + COMPONENT_PREFIXES="eu.gcr.io/gardener-project/gardener,europe-docker.pkg.dev/gardener-project" fi if [[ -z "${COMPONENT_CLI_ARGS}" ]]; then @@ -69,7 +63,6 @@ if [[ ! -z "$image_vector_path" ]]; then --comp-desc ${BASE_DEFINITION_PATH} \ --image-vector "$image_vector_path" \ --component-prefixes "${COMPONENT_PREFIXES}" \ - --generic-dependencies "${GENERIC_DEPENDENCIES}" \ " fi @@ -96,14 +89,7 @@ if [[ -d "$repo_root_dir/charts/" ]]; then REPOSITORY=${imageAndTag[0]} TAG=${imageAndTag[1]} - gardener="eu.gcr.io/gardener-project/gardener" - if [[ "$NAME" == "hyperkube" ]]; then - ${ADD_DEPENDENCIES_CMD} --generic-dependencies "{\"name\": \"$NAME\", \"version\": \"$TAG\"}" - elif [[ $REPOSITORY =~ "eu.gcr.io/gardener-project/gardener"* ]]; then - ${ADD_DEPENDENCIES_CMD} --generic-dependencies "{\"name\": \"$NAME\", \"version\": \"$TAG\"}" - else - ${ADD_DEPENDENCIES_CMD} --container-image-dependencies "{\"name\": \"${NAME}\", \"image_reference\": \"${REPOSITORY}:${TAG}\", \"version\": \"$TAG\"}" - fi + ${ADD_DEPENDENCIES_CMD} --container-image-dependencies "{\"name\": \"${NAME}\", \"image_reference\": \"${REPOSITORY}:${TAG}\", \"version\": \"$TAG\"}" done < <(echo "$outputFile") done fi diff --git a/vendor/github.com/gardener/gardener/hack/add-license-header.sh b/vendor/github.com/gardener/gardener/hack/add-license-header.sh index dd5b9ab08..28f2b8ed9 100755 --- a/vendor/github.com/gardener/gardener/hack/add-license-header.sh +++ b/vendor/github.com/gardener/gardener/hack/add-license-header.sh @@ -24,12 +24,10 @@ addlicense \ -ignore ".idea/**" \ -ignore ".vscode/**" \ -ignore "dev/**" \ - -ignore "vendor/**" \ -ignore "**/*.md" \ -ignore "**/*.html" \ -ignore "**/*.yaml" \ -ignore "**/Dockerfile" \ - -ignore "hack/tools/gomegacheck/**" \ -ignore "pkg/component/**/*.sh" \ -ignore "third_party/gopkg.in/yaml.v2/**" \ . diff --git a/vendor/github.com/gardener/gardener/hack/check-charts.sh b/vendor/github.com/gardener/gardener/hack/check-charts.sh index 8371b5aae..499990cd1 100755 --- a/vendor/github.com/gardener/gardener/hack/check-charts.sh +++ b/vendor/github.com/gardener/gardener/hack/check-charts.sh @@ -29,7 +29,7 @@ if [[ -d "$1" ]]; then echo "Checking whether all charts can be rendered" for chart_dir in $(find charts -type d -exec test -f '{}'/Chart.yaml \; -print -prune | sort); do [ -f "$chart_dir/values-test.yaml" ] && values_files="-f $chart_dir/values-test.yaml" || unset values_files - helm template $values_files "$chart_dir" 3>&1 1>/dev/null 2>&3 | (grep -v "found symbolic link in path" || true) + helm template $values_files "$chart_dir" > /dev/null 2> >(sed '/found symbolic link in path/d' >&2) done fi diff --git a/vendor/github.com/gardener/gardener/hack/check-docforge.sh b/vendor/github.com/gardener/gardener/hack/check-docforge.sh deleted file mode 100755 index 8186f74a8..000000000 --- a/vendor/github.com/gardener/gardener/hack/check-docforge.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -docCommitHash="fa2e9f84851be81e85668986675db235bb43a6b5" - -echo "> Check Docforge Manifest" -repoPath=${1-"$(readlink -f "$(dirname "${0}")/..")"} -manifestPath=${2-"${repoPath}/.docforge/manifest.yaml"} -diffDirs=${3-".docforge/;docs/"} -repoName=${4-"gardener"} -useToken=${5-false} - -tmpDir=$(mktemp -d) -function cleanup { - rm -rf "$tmpDir" -} -trap cleanup EXIT ERR INT TERM - -curl https://raw.githubusercontent.com/gardener/documentation/${docCommitHash}/.ci/check-manifest --output "${tmpDir}/check-manifest-script.sh" && chmod +x "${tmpDir}/check-manifest-script.sh" -curl https://raw.githubusercontent.com/gardener/documentation/${docCommitHash}/.ci/check-manifest-config --output "${tmpDir}/manifest-config" -scriptPath="${tmpDir}/check-manifest-script.sh" -configPath="${tmpDir}/manifest-config" - -${scriptPath} --repo-path "${repoPath}" --repo-name "${repoName}" --use-token "${useToken}" --manifest-path "${manifestPath}" --diff-dirs "${diffDirs}" --config-path "${configPath}" diff --git a/vendor/github.com/gardener/gardener/hack/check-generate.sh b/vendor/github.com/gardener/gardener/hack/check-generate.sh index 59679519f..c966f7855 100755 --- a/vendor/github.com/gardener/gardener/hack/check-generate.sh +++ b/vendor/github.com/gardener/gardener/hack/check-generate.sh @@ -16,7 +16,7 @@ set -e -echo "> Generate / Vendor Check" +echo "> Generate" makefile="$1/Makefile" check_branch="__check" @@ -24,14 +24,13 @@ initialized_git=false stashed=false checked_out=false generated=false -vendored=false function delete-check-branch { git rev-parse --verify "$check_branch" &>/dev/null && git branch -q -D "$check_branch" || : } function cleanup { - if [[ "$generated" == true ]] || [[ "$vendored" == true ]]; then + if [[ "$generated" == true ]]; then if ! clean_err="$(make -f "$makefile" clean && git reset --hard -q && git clean -qdf)"; then echo "Could not clean: $clean_err" fi @@ -107,20 +106,19 @@ if which git &>/dev/null; then exit 1 fi - echo ">> make revendor" - vendored=true - if ! out=$(make -f "$makefile" revendor 2>&1); then - echo "Error during calling make revendor: $out" + echo ">> make tidy" + if ! out=$(make -f "$makefile" tidy 2>&1); then + echo "Error during calling make tidy: $out" exit 1 fi new_status="$(git status -s)" if [[ "$old_status" != "$new_status" ]]; then - echo "make revendor needs to be run:" + echo "make tidy needs to be run:" echo "$new_status" exit 1 fi else - echo "No git detected, cannot run vendor check" + echo "No git detected, cannot run make check-generate" fi exit 0 diff --git a/vendor/github.com/gardener/gardener/hack/check-imports.sh b/vendor/github.com/gardener/gardener/hack/check-imports.sh index c42828a7c..2ea80cd25 100755 --- a/vendor/github.com/gardener/gardener/hack/check-imports.sh +++ b/vendor/github.com/gardener/gardener/hack/check-imports.sh @@ -31,6 +31,24 @@ echo "> Check Imports" this_module=$(go list -m) +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +VGOPATH="$VGOPATH" + +# Ensure that if GOPATH is set, the GOPATH/{bin,pkg} directory exists. This seems to be not always +# the case in certain environments like Prow. As we will create a symlink against the bin folder we +# need to make sure that the bin directory is present in the GOPATH. +if [ -n "$GOPATH" ] && [ ! -d "$GOPATH/bin" ]; then mkdir -p "$GOPATH/bin"; fi +if [ -n "$GOPATH" ] && [ ! -d "$GOPATH/pkg" ]; then mkdir -p "$GOPATH/pkg"; fi + +VIRTUAL_GOPATH="$(mktemp -d)" +trap 'rm -rf "$VIRTUAL_GOPATH"' EXIT + +# Setup virtual GOPATH so the codegen tools work as expected. +(cd "$SCRIPT_DIR/.."; go mod download && "$VGOPATH" -o "$VIRTUAL_GOPATH") + +export GOROOT="${GOROOT:-"$(go env GOROOT)"}" +export GOPATH="$VIRTUAL_GOPATH" + # We need to explicitly pass GO111MODULE=off to import-boss as it is significantly slower otherwise, # see https://github.com/kubernetes/code-generator/issues/100. export GO111MODULE=off diff --git a/vendor/github.com/gardener/gardener/hack/check-license-header.sh b/vendor/github.com/gardener/gardener/hack/check-license-header.sh index 84e3fb3cd..26724f582 100755 --- a/vendor/github.com/gardener/gardener/hack/check-license-header.sh +++ b/vendor/github.com/gardener/gardener/hack/check-license-header.sh @@ -22,13 +22,11 @@ missing_license_header_files="$(addlicense \ -ignore ".idea/**" \ -ignore ".vscode/**" \ -ignore "dev/**" \ - -ignore "vendor/**" \ -ignore "**/*.md" \ -ignore "**/*.html" \ -ignore "**/*.yaml" \ -ignore "**/Dockerfile" \ - -ignore "hack/tools/gomegacheck/**" \ - -ignore "pkg/component/**/*.sh" \ + -ignore "pkg/**/*.sh" \ -ignore "third_party/gopkg.in/yaml.v2/**" \ .)" || true diff --git a/vendor/github.com/gardener/gardener/hack/check-skaffold-deps-for-binary.sh b/vendor/github.com/gardener/gardener/hack/check-skaffold-deps-for-binary.sh new file mode 100755 index 000000000..eb8b75363 --- /dev/null +++ b/vendor/github.com/gardener/gardener/hack/check-skaffold-deps-for-binary.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash +# +# Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +skaffold_file="" +binary_name="" +skaffold_config_name="" + +parse_flags() { + operation="$1" + shift + + while test $# -gt 1; do + case "$1" in + --skaffold-file) + shift; skaffold_file="$1" + ;; + --binary) + shift; binary_name="$1" + ;; + --skaffold-config) + shift; skaffold_config_name="$1" + ;; + *) + echo "Unknown argument: $1" + exit 1 + ;; + esac + shift + done +} + +parse_flags "$@" + +out_dir=$(mktemp -d) +function cleanup_output { + rm -rf "$out_dir" +} +trap cleanup_output EXIT + +repo_root="$(git rev-parse --show-toplevel)" +skaffold_yaml="$(cat "$repo_root/$skaffold_file")" + +path_current_skaffold_dependencies="${out_dir}/current-$skaffold_file-deps-$binary_name.txt" +path_actual_dependencies="${out_dir}/actual-$skaffold_file-deps-$binary_name.txt" + +echo "$skaffold_yaml" |\ + yq eval "select(.metadata.name == \"$skaffold_config_name\") | .build.artifacts[] | select(.ko.main == \"./cmd/$binary_name\") | .ko.dependencies.paths[]?" - |\ + sort -f |\ + uniq > "$path_current_skaffold_dependencies" + +module_name=$(go list -m) +module_prefix="$module_name/" +go list -f '{{ join .Deps "\n" }}' "./cmd/$binary_name" |\ + grep "$module_prefix" |\ + sed "s@$module_prefix@@g" |\ + sort -f |\ + uniq > "$path_actual_dependencies" + +# always add VERSION file +echo "VERSION" >> "$path_actual_dependencies" + +# sort dependencies +sort -fo "$path_current_skaffold_dependencies"{,} +sort -fo "$path_actual_dependencies"{,} + +case "$operation" in + check) + echo -n ">> Checking defined dependencies in Skaffold config '$skaffold_config_name' for '$binary_name' in '$skaffold_file'..." + if ! diff="$(diff "$path_current_skaffold_dependencies" "$path_actual_dependencies")"; then + echo + echo ">>> The following actual dependencies are missing (need to be added):" + echo "$diff" | grep '>' | awk '{print $2}' + echo + echo ">>> The following dependencies are not needed actually (need to be removed):" + echo "$diff" | grep '<' | awk '{print $2}' + echo + echo ">>> Run './hack/update-skaffold-deps.sh' to fix." + + exit 1 + else + echo " success." + fi + ;; + update) + echo -n ">> Updating dependencies in Skaffold config '$skaffold_config_name' for '$binary_name' in '$skaffold_file'..." + + yq eval -i "select(.metadata.name == \"$skaffold_config_name\") |= .build.artifacts[] |= select(.ko.main == \"./cmd/$binary_name\") |= .ko.dependencies.paths |= [$(cat "$path_actual_dependencies" | sed -e 's/^/"/' -e 's/$/"/' | tr '\n' ',' | sed 's/,$//')]" "$skaffold_file" + + if ! diff="$(diff "$path_current_skaffold_dependencies" "$path_actual_dependencies")"; then + echo + echo ">>> Added the following dependencies:" + echo "$diff" | grep '>' | awk '{print $2}' + echo + echo ">>> Removed the following dependencies:" + echo "$diff" | grep '<' | awk '{print $2}' + echo + + exit 1 + else + echo " already up to date." + fi + ;; + *) + echo "Unknown operation: $operation" + exit 1 + ;; +esac diff --git a/vendor/github.com/gardener/gardener/hack/check-skaffold-deps.sh b/vendor/github.com/gardener/gardener/hack/check-skaffold-deps.sh index ecdaf774a..48cf864ec 100755 --- a/vendor/github.com/gardener/gardener/hack/check-skaffold-deps.sh +++ b/vendor/github.com/gardener/gardener/hack/check-skaffold-deps.sh @@ -16,78 +16,36 @@ set -e -echo "> Check Skaffold Dependencies" +operation="${1:-check}" -check_successful=true +echo "> ${operation^} Skaffold Dependencies" -out_dir=$(mktemp -d) -function cleanup_output { - rm -rf "$out_dir" -} -trap cleanup_output EXIT - -function check() { - skaffold_file="$1" - binary_name="$2" - skaffold_config_name="$3" - - skaffold_yaml="$(cat "$(dirname "$0")/../$skaffold_file")" - - path_current_skaffold_dependencies="${out_dir}/current-$skaffold_file-deps-$binary_name.txt" - path_actual_dependencies="${out_dir}/actual-$skaffold_file-deps-$binary_name.txt" - - echo "$skaffold_yaml" |\ - yq eval "select(.metadata.name == \"$skaffold_config_name\") | .build.artifacts[] | select(.ko.main == \"./cmd/$binary_name\") | .ko.dependencies.paths[]?" - |\ - sort |\ - uniq > "$path_current_skaffold_dependencies" +success=true +repo_root="$(git rev-parse --show-toplevel)" - go list -f '{{ join .Deps "\n" }}' "./cmd/$binary_name" |\ - grep "github.com/gardener/gardener/" |\ - sed 's/github\.com\/gardener\/gardener\///g' |\ - sort |\ - uniq > "$path_actual_dependencies" - - # always add vendor directory and VERSION file - echo "vendor" >> "$path_actual_dependencies" - echo "VERSION" >> "$path_actual_dependencies" - - # sort dependencies - sort -o $path_current_skaffold_dependencies{,} - sort -o $path_actual_dependencies{,} - - echo -n ">> Checking defined dependencies in Skaffold config '$skaffold_config_name' for '$binary_name' in '$skaffold_file'..." - if ! diff="$(diff "$path_current_skaffold_dependencies" "$path_actual_dependencies")"; then - check_successful=false - - echo - echo ">>> The following actual dependencies are missing in $skaffold_file (need to be added):" - echo "$diff" | grep '>' | awk '{print $2}' - echo - echo ">>> The following dependencies defined in $skaffold_file are not needed actually (need to be removed):" - echo "$diff" | grep '<' | awk '{print $2}' - echo - else - echo " success." +function run() { + if ! "$repo_root"/hack/check-skaffold-deps-for-binary.sh "$operation" --skaffold-file "$1" --binary "$2" --skaffold-config "$3"; then + success=false fi } # skaffold.yaml -check "skaffold.yaml" "gardener-admission-controller" "controlplane" -check "skaffold.yaml" "gardener-apiserver" "controlplane" -check "skaffold.yaml" "gardener-controller-manager" "controlplane" -check "skaffold.yaml" "gardener-extension-provider-local" "provider-local" -check "skaffold.yaml" "gardener-resource-manager" "gardenlet" -check "skaffold.yaml" "gardener-scheduler" "controlplane" -check "skaffold.yaml" "gardenlet" "gardenlet" +run "skaffold.yaml" "gardener-admission-controller" "controlplane" +run "skaffold.yaml" "gardener-apiserver" "controlplane" +run "skaffold.yaml" "gardener-controller-manager" "controlplane" +run "skaffold.yaml" "gardener-extension-provider-local" "provider-local" +run "skaffold.yaml" "gardener-resource-manager" "gardenlet" +run "skaffold.yaml" "gardener-scheduler" "controlplane" +run "skaffold.yaml" "gardenlet" "gardenlet" # skaffold-operator.yaml -check "skaffold-operator.yaml" "gardener-operator" "gardener-operator" -check "skaffold-operator.yaml" "gardener-resource-manager" "gardener-operator" -check "skaffold-operator.yaml" "gardener-admission-controller" "gardener-operator" -check "skaffold-operator.yaml" "gardener-apiserver" "gardener-operator" -check "skaffold-operator.yaml" "gardener-controller-manager" "gardener-operator" -check "skaffold-operator.yaml" "gardener-scheduler" "gardener-operator" - -if [ "$check_successful" = false ] ; then +run "skaffold-operator.yaml" "gardener-operator" "gardener-operator" +run "skaffold-operator.yaml" "gardener-resource-manager" "gardener-operator" +run "skaffold-operator.yaml" "gardener-admission-controller" "gardener-operator" +run "skaffold-operator.yaml" "gardener-apiserver" "gardener-operator" +run "skaffold-operator.yaml" "gardener-controller-manager" "gardener-operator" +run "skaffold-operator.yaml" "gardener-scheduler" "gardener-operator" + +if ! $success ; then exit 1 fi diff --git a/vendor/github.com/gardener/gardener/hack/check.sh b/vendor/github.com/gardener/gardener/hack/check.sh index 2753e3711..2925af165 100755 --- a/vendor/github.com/gardener/gardener/hack/check.sh +++ b/vendor/github.com/gardener/gardener/hack/check.sh @@ -34,7 +34,7 @@ golangci-lint run $GOLANGCI_LINT_CONFIG_FILE --timeout 10m $@ echo "Executing gofmt/goimports" folders=() -for f in $@; do +for f in "$@"; do folders+=( "$(echo $f | sed 's/\.\.\.//')" ) done unformatted_files="$(goimports -l ${folders[*]})" diff --git a/vendor/github.com/gardener/gardener/hack/compare-k8s-api-groups.sh b/vendor/github.com/gardener/gardener/hack/compare-k8s-api-groups.sh new file mode 100755 index 000000000..59b0818cf --- /dev/null +++ b/vendor/github.com/gardener/gardener/hack/compare-k8s-api-groups.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# +# Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +usage() { + echo "Usage:" + echo "> compare-k8s-apigroups.sh [ -h | ]" + echo + echo ">> For example: compare-k8s-apigroups.sh 1.26 1.27" + + exit 0 +} + +if [ "$1" == "-h" ] || [ "$#" -ne 2 ]; then + usage +fi + +versions=("$1" "$2") + +out_dir=$(mktemp -d) +function cleanup_output { + rm -rf "$out_dir" +} +trap cleanup_output EXIT + +base_dir="staging/src/k8s.io/client-go/informers" + +for version in "${versions[@]}"; do + rm -rf "${out_dir}/kubernetes-${version}" + rm -f "${out_dir}/k8s-apiGVRs-${version}.txt" + rm -f "${out_dir}/k8s-apiGVs-${version}.txt" + + git clone --depth 1 --filter=blob:none --sparse https://github.com/kubernetes/kubernetes -b "release-${version}" "${out_dir}/kubernetes-${version}" + pushd "${out_dir}/kubernetes-${version}" > /dev/null + git sparse-checkout set "$base_dir" + popd > /dev/null + + groupVersions=() + groupVersionResources=() + g="" + v="" + + while IFS= read -r line; do + if [[ $line =~ Group=([a-zA-Z0-9.-]+),[[:space:]]*Version=([a-zA-Z0-9.-]+) ]]; then + g="${BASH_REMATCH[1]}" + v="${BASH_REMATCH[2]}" + if [[ $g == "core" ]]; then + groupVersions+=("$v") + else + groupVersions+=("$g/$v") + fi + elif [[ $line =~ WithResource\(\"(.*)\"\) ]]; then + k="${BASH_REMATCH[1]}" + if [[ $g == "core" ]]; then + groupVersionResources+=("$v/$k") + else + groupVersionResources+=("$g/$v/$k") + fi + fi + done < "${out_dir}/kubernetes-${version}/${base_dir}/generic.go" + + echo "${groupVersions[@]}" | tr ' ' '\n' | sort | uniq > "${out_dir}/k8s-apiGVs-${version}.txt" + echo "${groupVersionResources[@]}" | tr ' ' '\n' | sort | uniq > "${out_dir}/k8s-apiGVRs-${version}.txt" +done + +echo +echo "Kubernetes API group versions added in $2 compared to $1:" +diff "${out_dir}/k8s-apiGVs-$1.txt" "${out_dir}/k8s-apiGVs-$2.txt" | grep '>' | awk '{print $2}' +echo +echo "Kubernetes API GVRs added in $2 compared to $1:" +diff "${out_dir}/k8s-apiGVRs-$1.txt" "${out_dir}/k8s-apiGVRs-$2.txt" | grep '>' | awk '{print $2}' +echo +echo "Kubernetes API group versions removed in $2 compared to $1:" +diff "${out_dir}/k8s-apiGVs-$1.txt" "${out_dir}/k8s-apiGVs-$2.txt" | grep '<' | awk '{print $2}' +echo +echo "Kubernetes API GVRs removed in $2 compared to $1:" \ No newline at end of file diff --git a/vendor/github.com/gardener/gardener/hack/compute-k8s-controllers.sh b/vendor/github.com/gardener/gardener/hack/compute-k8s-controllers.sh new file mode 100755 index 000000000..559a136c5 --- /dev/null +++ b/vendor/github.com/gardener/gardener/hack/compute-k8s-controllers.sh @@ -0,0 +1,175 @@ +#!/usr/bin/env bash +# +# Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +usage() { + echo "Usage:" + echo "> compute-k8s-controllers.sh [ -h | ]" + echo + echo ">> For example: compute-k8s-controllers.sh 1.26 1.27" + + exit 0 +} + +if [ "$1" == "-h" ] || [ "$#" -ne 2 ]; then + usage +fi + +versions=("$1" "$2") + +out_dir=$(mktemp -d) +function cleanup_output { + rm -rf "$out_dir" +} +trap cleanup_output EXIT + +# Define the path map +declare -A path_map=( + ["attachdetach"]="pkg/controller/volume/attachdetach/attach_detach_controller.go" + ["bootstrapsigner"]="pkg/controller/bootstrap/bootstrapsigner.go" + ["cloud-node-lifecycle"]="staging/src/k8s.io/cloud-provider/controllers/nodelifecycle/node_lifecycle_controller.go" + ["clusterrole-aggregation"]="pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go" + ["cronjob"]="pkg/controller/cronjob/cronjob_controllerv2.go" + ["csrapproving"]="pkg/controller/certificates/approver/sarapprove.go" + ["csrcleaner"]="pkg/controller/certificates/cleaner/cleaner.go" + ["csrsigning"]="pkg/controller/certificates/signer/signer.go" + ["daemonset"]="pkg/controller/daemon/daemon_controller.go" + ["deployment"]="pkg/controller/deployment/deployment_controller.go" + ["disruption"]="pkg/controller/disruption/disruption.go" + ["endpoint"]="pkg/controller/endpoint/endpoints_controller.go" + ["endpointslice"]="pkg/controller/endpointslice/endpointslice_controller.go" + ["endpointslicemirroring"]="pkg/controller/endpointslicemirroring/endpointslicemirroring_controller.go" + ["ephemeral-volume"]="pkg/controller/volume/ephemeral/controller.go" + ["garbagecollector"]="pkg/controller/garbagecollector/garbagecollector.go" + ["horizontalpodautoscaling"]="pkg/controller/podautoscaler/horizontal.go" + ["job"]="pkg/controller/job/job_controller.go" + ["legacy-service-account-token-cleaner"]="pkg/controller/serviceaccount/legacy_serviceaccount_token_cleaner.go" + ["namespace"]="pkg/controller/namespace/namespace_controller.go" + ["nodeipam"]="pkg/controller/nodeipam/node_ipam_controller.go" + ["nodelifecycle"]="pkg/controller/nodelifecycle/node_lifecycle_controller.go" + ["persistentvolume-binder"]="pkg/controller/volume/persistentvolume/pv_controller_base.go" + ["persistentvolume-expander"]="pkg/controller/volume/expand/expand_controller.go" + ["podgc"]="pkg/controller/podgc/gc_controller.go" + ["pv-protection"]="pkg/controller/volume/pvprotection/pv_protection_controller.go" + ["pvc-protection"]="pkg/controller/volume/pvcprotection/pvc_protection_controller.go" + ["replicaset"]="pkg/controller/replicaset/replica_set.go" + ["replicationcontroller"]="pkg/controller/replication/replication_controller.go" + ["resource-claim-controller"]="pkg/controller/resourceclaim/controller.go" + ["resourcequota"]="pkg/controller/resourcequota/resource_quota_controller.go" + ["root-ca-cert-publisher"]="pkg/controller/certificates/rootcacertpublisher/publisher.go" + ["route"]="staging/src/k8s.io/cloud-provider/controllers/route/route_controller.go" + ["service"]="staging/src/k8s.io/cloud-provider/controllers/service/controller.go" + ["serviceaccount"]="pkg/controller/serviceaccount/serviceaccounts_controller.go" + ["serviceaccount-token"]="pkg/controller/serviceaccount/tokens_controller.go" + ["statefulset"]="pkg/controller/statefulset/stateful_set.go" + ["storage-version-gc"]="pkg/controller/storageversiongc/gc_controller.go" + ["tokencleaner"]="pkg/controller/bootstrap/tokencleaner.go" + ["ttl"]="pkg/controller/ttl/ttl_controller.go" + ["ttl-after-finished"]="pkg/controller/ttlafterfinished/ttlafterfinished_controller.go" +) + +for version in "${versions[@]}"; do + rm -rf "${out_dir}/kubernetes-${version}" + rm -f "${out_dir}/k8s-controllers-${version}.txt" + + git clone --depth 1 --filter=blob:none --sparse https://github.com/kubernetes/kubernetes -b "release-${version}" "${out_dir}/kubernetes-${version}" + pushd "${out_dir}/kubernetes-${version}" > /dev/null + git sparse-checkout set "cmd/kube-controller-manager" "pkg/controller" "staging/src/k8s.io/cloud-provider/controllers" + popd > /dev/null + + if [ "$version" \< "1.26" ]; then + names=$(grep -o 'controllers\["[^"]*' "${out_dir}/kubernetes-${version}/cmd/kube-controller-manager/app/controllermanager.go" | awk -F '"' '{print $2}') + # This is a special controller which is not initialized normally, see https://github.com/kubernetes/kubernetes/blob/99151c39b7d4595632f7745ba7fb4dea4356f7fd/cmd/kube-controller-manager/app/controllermanager.go#L405-L411 + names+=" serviceaccount-token" + elif [ "$version" \< "1.28" ]; then + names=$(grep -o 'register("[^"]*' "${out_dir}/kubernetes-${version}/cmd/kube-controller-manager/app/controllermanager.go" | awk -F '"' '{print $2}') + # This is a special controller which is not initialized normally, see https://github.com/kubernetes/kubernetes/blob/99151c39b7d4595632f7745ba7fb4dea4356f7fd/cmd/kube-controller-manager/app/controllermanager.go#L405-L411 + names+=" serviceaccount-token" + else + names=$(grep -E 'func KCMControllerAliases\(\) map\[string\]string \{' "${out_dir}/kubernetes-${version}/cmd/kube-controller-manager/names/controller_names.go" -A 200 | awk -F '[" :]+' '/^ \"[a-zA-Z0-9-]+\"/ {print $2}') + fi + + for name in $names; do + if [ ! "${path_map[$name]}" ]; then + echo "No path mapping found for $name", The controller could have been removed or the path might have changed. + echo "Please enhance the map in the script with the path for this controller." + exit 1 + fi + done + + unset api_group_controllers + declare -A api_group_controllers + + for controller in $names; do + file_path="${out_dir}/kubernetes-${version}/${path_map[$controller]}" + if [ -f "$file_path" ]; then + # Find lines containing 'k8s.io/api/' in the file, and extract content after 'k8s.io/api/' up to + # the next double quote. This will be the API groups used for this controller. + api_groups=$(grep -o 'k8s\.io/api/[^"]*' "$file_path" | awk -F 'k8s.io/api/' '{print $2}') + for api_group in $api_groups + do + api_group=$(echo "$api_group" | tr -d '[:space:]' | sed 's/^core\/v1$/v1/' | sed 's/apiserverinternal/internal/') + if [ -n "$api_group" ]; then + api_group_controllers["$api_group"]+="$controller " + fi + done + else + echo "The file $file_path cannot be found. Please enhance the map in the script with the correct path for this controller." + exit 1 + fi + done + + for api_group in "${!api_group_controllers[@]}"; do + echo "$api_group:$(echo "${api_group_controllers[$api_group]}" | tr ' ' '\n' | sort | tr '\n' ' ')" >> "${out_dir}/k8s-controllers-${version}.txt" + done + + sort -o "${out_dir}/k8s-controllers-${version}.txt" "${out_dir}/k8s-controllers-${version}.txt" +done + +echo +echo "kube-controller-manager controllers added in $2 compared to $1:" +IFS=$'\n' read -r -d '' -a added_lines < <(diff "${out_dir}/k8s-controllers-$1.txt" "${out_dir}/k8s-controllers-$2.txt" | grep '^>' | sed 's/^> //' && printf '\0') +for added_line in "${added_lines[@]}"; do + api_group=$(echo "$added_line" | awk -F ': ' '{print $1}') + controllers=$(echo "$added_line" | awk -F ': ' '{print $2}' | tr ' ' '\n') + + # Find the corresponding line in the other file + old_line=$(grep "^$api_group: " "${out_dir}/k8s-controllers-$1.txt" | awk -F ': ' '{print $2}' | tr ' ' '\n') + + added_controllers=$(comm -23 <(echo "$controllers" | sort) <(echo "$old_line" | sort) | tr '\n' ' ') + + if [ -n "$added_controllers" ]; then + echo "Added Controllers for API Group [$api_group]: $added_controllers" + fi +done + +echo +echo "kube-controller-manager controllers removed in $2 compared to $1:" +IFS=$'\n' read -r -d '' -a removed_lines < <(diff "${out_dir}/k8s-controllers-$1.txt" "${out_dir}/k8s-controllers-$2.txt" | grep '^<' | sed 's/^< //' && printf '\0') +for removed_line in "${removed_lines[@]}"; do + api_group=$(echo "$removed_line" | awk -F ': ' '{print $1}') + controllers=$(echo "$removed_line" | awk -F ': ' '{print $2}' | tr ' ' '\n') + + # Find the corresponding line in the other file + new_line=$(grep "^$api_group: " "${out_dir}/k8s-controllers-$2.txt" | awk -F ': ' '{print $2}' | tr ' ' '\n') + + removed_controllers=$(comm -23 <(echo "$controllers" | sort) <(echo "$new_line" | sort) | tr '\n' ' ') + + if [ -n "$removed_controllers" ]; then + echo "Removed Controllers for API Group [$api_group]: $removed_controllers" + fi +done diff --git a/vendor/github.com/gardener/gardener/hack/gardener-extensions-down.sh b/vendor/github.com/gardener/gardener/hack/gardener-extensions-down.sh index d7b16579f..55e79052c 100755 --- a/vendor/github.com/gardener/gardener/hack/gardener-extensions-down.sh +++ b/vendor/github.com/gardener/gardener/hack/gardener-extensions-down.sh @@ -60,10 +60,11 @@ if [[ "$remaining_seeds" != "" ]]; then echo "No clean up of kind cluster because of remaining seeds: ${remaining_seeds//$'\n'/,}" else echo "Cleaning up admission controllers" - "$SCRIPT_DIR"/../example/provider-extensions/garden/configure-admission.sh "$PATH_GARDEN_KUBECONFIG" delete + "$SCRIPT_DIR"/../example/provider-extensions/garden/configure-admission.sh "$PATH_GARDEN_KUBECONFIG" delete --ignore-not-found echo "Cleaning up kind cluster" kubectl --kubeconfig="$PATH_GARDEN_KUBECONFIG" delete validatingwebhookconfiguration/gardener-admission-controller --ignore-not-found - kubectl --kubeconfig="$PATH_GARDEN_KUBECONFIG" annotate project local garden confirmation.gardener.cloud/deletion=true + kubectl --kubeconfig="$PATH_GARDEN_KUBECONFIG" annotate project garden confirmation.gardener.cloud/deletion=true + kubectl --kubeconfig="$PATH_GARDEN_KUBECONFIG" annotate -f "$SCRIPT_DIR"/../example/provider-extensions/garden/project/project.yaml confirmation.gardener.cloud/deletion=true skaffold --kubeconfig="$PATH_GARDEN_KUBECONFIG" delete -m extensions-env -p extensions skaffold --kubeconfig="$PATH_GARDEN_KUBECONFIG" delete -m etcd,controlplane -p extensions kubectl --kubeconfig="$PATH_GARDEN_KUBECONFIG" delete ns garden gardener-system-seed-lease --ignore-not-found diff --git a/vendor/github.com/gardener/gardener/hack/generate-crds.sh b/vendor/github.com/gardener/gardener/hack/generate-crds.sh index b119d9a4c..1655ef6e3 100755 --- a/vendor/github.com/gardener/gardener/hack/generate-crds.sh +++ b/vendor/github.com/gardener/gardener/hack/generate-crds.sh @@ -19,13 +19,16 @@ set -o nounset set -o pipefail # Usage: -# generate-crds.sh [ ...] +# generate-crds.sh [] [ ...] # Generate manifests for all CRDs to the current working directory. # Useful for development purposes. # -# File name prefix for manifest files (e.g. '10-crd-') -# -l (Optional) If -l argument is given then the generated CRDs will have label gardener.cloud/deletion-protected: "true" -# List of groups to generate (generate all if unset) +# -p File name prefix for manifest files (e.g. '10-crd-') +# -l (Optional) If this argument is given then the generated CRDs will have label gardener.cloud/deletion-protected: "true" +# -k (Optional) If this argument is given then the generated CRDs will have annotation resources.gardener.cloud/keep-object: "true" +# -r (Optional) If this argument is given then the generated CRDs will have annotation api-approved.kubernetes.io: "" +# --allow-dangerous-types (Optional) If this argument is given then the CRD generation will tolerate issues related to dangerous types. +# List of groups to generate (generate all if unset) if ! command -v controller-gen &> /dev/null ; then >&2 echo "controller-gen not available" @@ -33,10 +36,31 @@ if ! command -v controller-gen &> /dev/null ; then fi output_dir="$(pwd)" -file_name_prefix="$1" +output_dir_temp="$(mktemp -d)" add_deletion_protection_label=false +add_keep_object_annotation=false +k8s_io_api_approval_reason="unapproved, temporarily squatting" crd_options="" +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +VGOPATH="$VGOPATH" + +# Ensure that if GOPATH is set, the GOPATH/{bin,pkg} directory exists. This seems to be not always +# the case in certain environments like Prow. As we will create a symlink against the bin folder we +# need to make sure that the bin directory is present in the GOPATH. +if [ -n "$GOPATH" ] && [ ! -d "$GOPATH/bin" ]; then mkdir -p "$GOPATH/bin"; fi +if [ -n "$GOPATH" ] && [ ! -d "$GOPATH/pkg" ]; then mkdir -p "$GOPATH/pkg"; fi + +VIRTUAL_GOPATH="$(mktemp -d)" +trap 'rm -rf "$VIRTUAL_GOPATH"' EXIT + +# Setup virtual GOPATH so the codegen tools work as expected. +(cd "$SCRIPT_DIR/.."; go mod download && "$VGOPATH" -o "$VIRTUAL_GOPATH") + +export GOROOT="${GOROOT:-"$(go env GOROOT)"}" +export GOPATH="$VIRTUAL_GOPATH" +export GO111MODULE=off + get_group_package () { case "$1" in "extensions.gardener.cloud") @@ -51,12 +75,18 @@ get_group_package () { "druid.gardener.cloud") echo "github.com/gardener/etcd-druid/api/v1alpha1" ;; - "autoscaling.k8s.io") + "hvpaautoscaling.k8s.io") echo "github.com/gardener/hvpa-controller/api/v1alpha1" ;; "fluentbit.fluent.io") echo "github.com/fluent/fluent-operator/v2/apis/fluentbit/v1alpha2" ;; + "autoscaling.k8s.io") + echo "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" + ;; + "machine.sapcloud.io") + echo "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1" + ;; *) >&2 echo "unknown group $1" return 1 @@ -68,8 +98,10 @@ generate_all_groups () { generate_group resources.gardener.cloud generate_group operator.gardener.cloud generate_group druid.gardener.cloud + generate_group hvpaautoscaling.k8s.io generate_group autoscaling.k8s.io generate_group fluentbit.fluent.io + generate_group machine.sapcloud.io } generate_group () { @@ -85,18 +117,40 @@ generate_group () { exit 1 fi - # clean all generated files for this group to account for changed prefix or removed resources - if ls "$output_dir"/*${group}_*.yaml >/dev/null 2>&1; then - rm "$output_dir"/*${group}_*.yaml - fi + generate="controller-gen crd"$crd_options" paths="$package_path" output:crd:dir="$output_dir_temp" output:stdout" - controller-gen crd"$crd_options" paths="$package_path" output:crd:dir="$output_dir" output:stdout + if [[ "$group" == "druid.gardener.cloud" ]]; then + # /scale subresource is intentionally removed from this CRD, although it is specified in the original CRD from + # etcd-druid, due to adverse interaction with VPA. + # See https://github.com/gardener/gardener/pull/6850 and https://github.com/gardener/gardener/pull/8560#discussion_r1347470394 + # TODO(shreyas-s-rao): Remove this workaround as soon as the scale subresource is supported properly. + etcd_druid_dir="$(go list -f '{{ .Dir }}' "github.com/gardener/etcd-druid")" + etcd_api_types_file="${etcd_druid_dir}/api/v1alpha1/types_etcd.go" + # Create a local copy outside the mod cache path in order to patch the types file via sed. + etcd_api_types_backup="$(mktemp -d)/types_etcd.go" + cp "$etcd_api_types_file" "$etcd_api_types_backup" + chmod +w "$etcd_api_types_file" "$etcd_druid_dir/api/v1alpha1/" + trap 'cp "$etcd_api_types_backup" "$etcd_api_types_file" && chmod -w "$etcd_druid_dir/api/v1alpha1/"' EXIT + sed -i '/\/\/ +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector/d' "$etcd_api_types_file" + $generate + elif [[ "$group" == "autoscaling.k8s.io" ]]; then + # See https://github.com/kubernetes/autoscaler/blame/master/vertical-pod-autoscaler/hack/generate-crd-yaml.sh#L43-L45 + generator_output="$(mktemp -d)/controller-gen.log" + # As go list does not work with symlinks we need to manually construct the package paths to correctly + # generate v1beta2 CRDs. + package_path="${package_path};${package_path}beta2;" + generate="controller-gen crd"$crd_options" paths="$package_path" output:crd:dir="$output_dir_temp" output:stdout" + $generate &> "$generator_output" ||: + grep -v -e 'map keys must be strings, not int' -e 'not all generators ran successfully' -e 'usage' "$generator_output" && { echo "Failed to generate CRD YAMLs."; exit 1; } + else + $generate + fi + local relevant_files=("$@") while IFS= read -r crd; do crd_out="$output_dir/$file_name_prefix$(basename $crd)" - if [ "$crd" != "$crd_out" ]; then - mv "$crd" "$crd_out" - fi + mv "$crd" "$crd_out" + relevant_files+=("$(basename "$crd_out")") if $add_deletion_protection_label; then if grep -q "clusters.extensions.gardener.cloud" "$crd_out"; then @@ -106,42 +160,79 @@ generate_group () { fi fi + if $add_keep_object_annotation; then + sed -i '/^ annotations:.*/a\ resources.gardener.cloud/keep-object: "true"' "$crd_out" + fi + # TODO(plkokanov): this is needed to add the `api-approved.kubernetes.io` annotaiton to resource from the *.k8s.io api group generated by controller-gen # Currently there is an issue open to do that automatically: https://github.com/kubernetes-sigs/controller-tools/issues/656 if [[ ${group} =~ .*\.k8s\.io ]]; then - sed -i '/^ annotations:.*/a\ api-approved.kubernetes.io: unapproved, temporarily squatting' "$crd_out" + sed -i "/^ annotations:.*/a\ api-approved.kubernetes.io: $k8s_io_api_approval_reason" "$crd_out" fi - done < <(ls "$output_dir/${group}"_*.yaml) -} + done < <(ls "$output_dir_temp/${group/hvpa/}"_*.yaml) -if [ -n "${2:-}" ]; then - if [ "${2}" == "-l" ]; then - add_deletion_protection_label=true - if [ -n "${3:-}" ]; then - while [ -n "${3:-}" ] ; do - generate_group "$3" - shift - done - else - generate_all_groups - fi - elif [ "${2}" == "-allow-dangerous-types" ]; then - crd_options=":allowDangerousTypes=true" - if [ -n "${3:-}" ]; then - while [ -n "${3:-}" ] ; do - generate_group "$3" - shift - done - else - generate_all_groups + # garbage collection - clean all generated files for this group to account for changed prefix or removed resources + local pattern=".*${group}_.*\.yaml" + if [[ "$group" == "autoscaling.k8s.io" ]]; then + pattern=".*${group}_v.*\.yaml" + fi + + while IFS= read -r file; do + file_name=$(basename "$file") + delete_no_longer_needed_file=true + + for relevant_file_name in "${relevant_files[@]}"; do + if [[ $file_name == "$relevant_file_name" ]] || [[ ! $file_name =~ $pattern ]]; then + delete_no_longer_needed_file=false + break + fi + done + + if $delete_no_longer_needed_file; then + rm "$file" fi - else - while [ -n "${2:-}" ] ; do - generate_group "$2" + done < <(ls "$output_dir") +} + +parse_flags() { + while test $# -gt 0; do + case "$1" in + -p) + file_name_prefix="$2" shift - done - fi + shift + ;; + -r) + k8s_io_api_approval_reason="$2" + shift + shift + ;; + -l) + add_deletion_protection_label=true + shift + ;; + -k) + add_keep_object_annotation=true + shift + ;; + --allow-dangerous-types) + crd_options=":allowDangerousTypes=true" + shift + ;; + *) + args+=("$1") + shift + ;; + esac + done +} + +parse_flags "$@" + +if [ -n "$args" ]; then + for group in "${args[@]}"; do + generate_group "$group" + done else generate_all_groups fi - diff --git a/vendor/github.com/gardener/gardener/hack/generate-logcheck-symlinks.sh b/vendor/github.com/gardener/gardener/hack/generate-logcheck-symlinks.sh new file mode 100755 index 000000000..e079f03ca --- /dev/null +++ b/vendor/github.com/gardener/gardener/hack/generate-logcheck-symlinks.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# +# Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# Create symlinks to local mod chache for logr and controller-runtime log. + +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +LOGCHECK_DIR="$LOGCHECK_DIR" + +cd "$SCRIPT_DIR"/.. +echo $LOGCHECK_DIR +LOGR_DIR=$(go list -f '{{ .Dir }}' github.com/go-logr/logr) +CONTROLLER_RUNTIME_LOGR_DIR=$(go list -f '{{ .Dir }}' sigs.k8s.io/controller-runtime/pkg/log) + +if [ ! -h "./$LOGCHECK_DIR/pkg/logcheck/testdata/src/github.com/go-logr/logr" ]; then + ln -s "$LOGR_DIR" "./$LOGCHECK_DIR/pkg/logcheck/testdata/src/github.com/go-logr/logr" +fi +if [ ! -h "./$LOGCHECK_DIR/pkg/logcheck/testdata/src/sigs.k8s.io/controller-runtime/pkg/log" ]; then + ln -s "$CONTROLLER_RUNTIME_LOGR_DIR" "./$LOGCHECK_DIR/pkg/logcheck/testdata/src/sigs.k8s.io/controller-runtime/pkg/log" +fi diff --git a/vendor/github.com/gardener/gardener/hack/generate.sh b/vendor/github.com/gardener/gardener/hack/generate.sh index bfd431d37..d236964c6 100755 --- a/vendor/github.com/gardener/gardener/hack/generate.sh +++ b/vendor/github.com/gardener/gardener/hack/generate.sh @@ -16,7 +16,7 @@ set -e -WHAT="protobuf codegen manifests logcheck gomegacheck monitoring-docs" +WHAT="protobuf codegen manifests logcheck monitoring-docs" CODEGEN_GROUPS="" MANIFESTS_DIRS="" MODE="" @@ -78,12 +78,12 @@ run_target() { $REPO_ROOT/hack/update-protobuf.sh ;; codegen) - local mode="${MODE:-sequential}" + local mode="${MODE:-sequential}" $REPO_ROOT/hack/update-codegen.sh --groups "$CODEGEN_GROUPS" --mode "$mode" ;; manifests) local which=() - local mode="${MODE:-parallel}" + local mode="${MODE:-parallel}" if [[ -z "$MANIFESTS_DIRS" ]]; then which=("${DEFAULT_MANIFESTS_DIRS[@]}") @@ -105,14 +105,11 @@ run_target() { logcheck) cd "$REPO_ROOT/$LOGCHECK_DIR" && go generate ./... ;; - gomegacheck) - cd "$REPO_ROOT/$GOMEGACHECK_DIR" && go generate ./... - ;; monitoring-docs) $REPO_ROOT/hack/generate-monitoring-docs.sh ;; *) - printf "ERROR: Unknown target: $target. Available targets are 'protobuf', 'codegen', 'manifests', 'logcheck', 'gomegacheck', 'monitoring-docs'.\n\n" + printf "ERROR: Unknown target: $target. Available targets are 'protobuf', 'codegen', 'manifests', 'logcheck', 'monitoring-docs'.\n\n" ;; esac } diff --git a/vendor/github.com/gardener/gardener/hack/hook-me.sh b/vendor/github.com/gardener/gardener/hack/hook-me.sh index 0bc38f0a8..3d72c55a2 100755 --- a/vendor/github.com/gardener/gardener/hack/hook-me.sh +++ b/vendor/github.com/gardener/gardener/hack/hook-me.sh @@ -257,16 +257,16 @@ extendedKeyUsage = clientAuth EOF # Create a certificate authority - openssl genrsa -out ca.key 2048 + openssl genrsa -out ca.key 3072 openssl req -x509 -new -nodes -key ca.key -days 100000 -out ca.crt -subj "/CN=quic-tunnel-ca" # Create a server certiticate - openssl genrsa -out tls.key 2048 + openssl genrsa -out tls.key 3072 openssl req -new -key tls.key -out server.csr -subj "/CN=quic-tunnel-server" -config server.conf openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt -days 100000 -extensions v3_req -extfile server.conf # Create a client certiticate - openssl genrsa -out client.key 2048 + openssl genrsa -out client.key 3072 openssl req -new -key client.key -out client.csr -subj "/CN=quic-tunnel-client" -config client.conf openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt -days 100000 -extensions v3_req -extfile client.conf diff --git a/vendor/github.com/gardener/gardener/hack/install.sh b/vendor/github.com/gardener/gardener/hack/install.sh index 718d1bfd7..25282b2e7 100755 --- a/vendor/github.com/gardener/gardener/hack/install.sh +++ b/vendor/github.com/gardener/gardener/hack/install.sh @@ -21,5 +21,5 @@ echo "> Install" LD_FLAGS="${LD_FLAGS:-$($(dirname $0)/get-build-ld-flags.sh)}" CGO_ENABLED=0 GOOS=$(go env GOOS) GOARCH=$(go env GOARCH) GO111MODULE=on \ - go install -mod=vendor -ldflags "$LD_FLAGS" \ + go install -ldflags "$LD_FLAGS" \ $@ diff --git a/vendor/github.com/gardener/gardener/hack/kind-up.sh b/vendor/github.com/gardener/gardener/hack/kind-up.sh index 4ae61df22..23397b052 100755 --- a/vendor/github.com/gardener/gardener/hack/kind-up.sh +++ b/vendor/github.com/gardener/gardener/hack/kind-up.sh @@ -177,9 +177,9 @@ kind create cluster \ # adjust Kind's CRI default OCI runtime spec for new containers to include the cgroup namespace # this is required for nesting kubelets on cgroupsv2, as the kindest-node entrypoint script assumes an existing cgroupns when the host kernel uses cgroupsv2 # See containerd CRI: https://github.com/containerd/containerd/commit/687469d3cee18bf0e12defa5c6d0c7b9139a2dbd -if [ -f "/sys/fs/cgroup/cgroup.controllers" ]; then +if [ -f "/sys/fs/cgroup/cgroup.controllers" ] || [ "$(uname -s)" == "Darwin" ]; then echo "Host uses cgroupsv2" - cat << 'EOF' > adjust_cri_base.sh + cat << 'EOF' > "$(dirname "$0")/../dev/adjust_cri_base.sh" #!/bin/bash if [ -f /etc/containerd/cri-base.json ]; then key=$(cat /etc/containerd/cri-base.json | jq '.linux.namespaces | map(select(.type == "cgroup"))[0]') @@ -201,7 +201,7 @@ EOF echo "Adjusting containerd config for kind node $node_name" # copy script to the kind's docker container and execute it - docker cp adjust_cri_base.sh "$node_name":/etc/containerd/adjust_cri_base.sh + docker cp "$(dirname "$0")/../dev/adjust_cri_base.sh" "$node_name":/etc/containerd/adjust_cri_base.sh docker exec "$node_name" bash -c "chmod +x /etc/containerd/adjust_cri_base.sh && /etc/containerd/adjust_cri_base.sh && systemctl restart containerd" done fi diff --git a/vendor/github.com/gardener/gardener/hack/test-cover.sh b/vendor/github.com/gardener/gardener/hack/test-cover.sh index bbe19d0e1..c67df801f 100755 --- a/vendor/github.com/gardener/gardener/hack/test-cover.sh +++ b/vendor/github.com/gardener/gardener/hack/test-cover.sh @@ -27,7 +27,7 @@ COVERPROFILE_HTML="$REPO_ROOT/test.coverage.html" trap "rm -rf \"$COVERPROFILE_TMP\"" EXIT ERR INT TERM -GO111MODULE=on go test -cover -coverprofile "$COVERPROFILE_TMP" -race -timeout=2m -mod=vendor $@ | grep -v 'no test files' +GO111MODULE=on go test -cover -coverprofile "$COVERPROFILE_TMP" -race -timeout=2m $@ | grep -v 'no test files' cat "$COVERPROFILE_TMP" | grep -vE "\.pb\.go|zz_generated" > "$COVERPROFILE" go tool cover -html="$COVERPROFILE" -o="$COVERPROFILE_HTML" diff --git a/vendor/github.com/gardener/gardener/hack/test-e2e-local.sh b/vendor/github.com/gardener/gardener/hack/test-e2e-local.sh index 120e50c18..2d7f6d687 100755 --- a/vendor/github.com/gardener/gardener/hack/test-e2e-local.sh +++ b/vendor/github.com/gardener/gardener/hack/test-e2e-local.sh @@ -29,6 +29,9 @@ ginkgo_flags= if [ -n "${CI:-}" -a -n "${ARTIFACTS:-}" ]; then mkdir -p "$ARTIFACTS" ginkgo_flags="--output-dir=$ARTIFACTS --junit-report=junit.xml" + if [ "${JOB_TYPE:-}" != "periodic" ]; then + ginkgo_flags+=" --fail-fast" + fi fi # If we are not running the gardener-operator tests then we have to make the shoot domains accessible. diff --git a/vendor/github.com/gardener/gardener/hack/test-integration.sh b/vendor/github.com/gardener/gardener/hack/test-integration.sh index 5e3b70754..c36566f42 100755 --- a/vendor/github.com/gardener/gardener/hack/test-integration.sh +++ b/vendor/github.com/gardener/gardener/hack/test-integration.sh @@ -36,4 +36,4 @@ else timeout_flag=-timeout=5m fi -GO111MODULE=on go test ${timeout_flag:-} -mod=vendor $@ $test_flags | grep -v 'no test files' +GO111MODULE=on go test ${timeout_flag:-} $@ $test_flags | grep -v 'no test files' diff --git a/vendor/github.com/gardener/gardener/hack/test.sh b/vendor/github.com/gardener/gardener/hack/test.sh index fd194f28c..9daa5e27b 100755 --- a/vendor/github.com/gardener/gardener/hack/test.sh +++ b/vendor/github.com/gardener/gardener/hack/test.sh @@ -38,4 +38,4 @@ else timeout_flag=-timeout=2m fi -GO111MODULE=on go test -race ${timeout_flag:-} -mod=vendor $@ $test_flags | grep -v 'no test files' +GO111MODULE=on go test -race ${timeout_flag:-} "$@" $test_flags | grep -v 'no test files' diff --git a/vendor/github.com/gardener/gardener/hack/tools.go b/vendor/github.com/gardener/gardener/hack/tools.go index 45fdc98de..677963f87 100755 --- a/vendor/github.com/gardener/gardener/hack/tools.go +++ b/vendor/github.com/gardener/gardener/hack/tools.go @@ -21,6 +21,7 @@ package tools import ( _ "github.com/ahmetb/gen-crd-api-reference-docs" _ "github.com/bronze1man/yaml2json" + _ "github.com/ironcore-dev/vgopath" _ "github.com/onsi/ginkgo/v2/ginkgo" _ "go.uber.org/mock/mockgen" _ "golang.org/x/tools/cmd/goimports" diff --git a/vendor/github.com/gardener/gardener/hack/tools.mk b/vendor/github.com/gardener/gardener/hack/tools.mk index 166cd60cf..b699684ad 100755 --- a/vendor/github.com/gardener/gardener/hack/tools.mk +++ b/vendor/github.com/gardener/gardener/hack/tools.mk @@ -23,20 +23,18 @@ ifeq ($(strip $(shell go list -m 2>/dev/null)),github.com/gardener/gardener) TOOLS_PKG_PATH := ./hack/tools else # dependency on github.com/gardener/gardener/hack/tools is optional and only needed if other projects want to reuse -# install-promtool.sh, logcheck, or gomegacheck. If they don't use it and the project doesn't depend on the package, +# install-promtool.sh, or logcheck. If they don't use it and the project doesn't depend on the package, # silence the error to minimize confusion. TOOLS_PKG_PATH := $(shell go list -tags tools -f '{{ .Dir }}' github.com/gardener/gardener/hack/tools 2>/dev/null) endif TOOLS_BIN_DIR := $(TOOLS_DIR)/bin CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen -DOCFORGE := $(TOOLS_BIN_DIR)/docforge GEN_CRD_API_REFERENCE_DOCS := $(TOOLS_BIN_DIR)/gen-crd-api-reference-docs GINKGO := $(TOOLS_BIN_DIR)/ginkgo GOIMPORTS := $(TOOLS_BIN_DIR)/goimports GOIMPORTSREVISER := $(TOOLS_BIN_DIR)/goimports-reviser GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint -GOMEGACHECK := $(TOOLS_BIN_DIR)/gomegacheck.so # plugin binary GO_ADD_LICENSE := $(TOOLS_BIN_DIR)/addlicense GO_APIDIFF := $(TOOLS_BIN_DIR)/go-apidiff GO_VULN_CHECK := $(TOOLS_BIN_DIR)/govulncheck @@ -56,21 +54,22 @@ SETUP_ENVTEST := $(TOOLS_BIN_DIR)/setup-envtest SKAFFOLD := $(TOOLS_BIN_DIR)/skaffold YAML2JSON := $(TOOLS_BIN_DIR)/yaml2json YQ := $(TOOLS_BIN_DIR)/yq +VGOPATH := $(TOOLS_BIN_DIR)/vgopath # default tool versions -DOCFORGE_VERSION ?= v0.34.0 -GOLANGCI_LINT_VERSION ?= v1.54.2 +GOLANGCI_LINT_VERSION ?= v1.55.1 GO_APIDIFF_VERSION ?= v0.6.1 GO_ADD_LICENSE_VERSION ?= v1.1.1 -GOIMPORTSREVISER_VERSION ?= v3.4.5 +GOIMPORTSREVISER_VERSION ?= v3.5.6 GO_VULN_CHECK_VERSION ?= latest -HELM_VERSION ?= v3.12.3 +HELM_VERSION ?= v3.13.1 KIND_VERSION ?= v0.20.0 -KUBECTL_VERSION ?= v1.28.2 -PROMTOOL_VERSION ?= 2.34.0 -PROTOC_VERSION ?= 24.1 -SKAFFOLD_VERSION ?= v2.7.0 -YQ_VERSION ?= v4.35.1 +KUBECTL_VERSION ?= v1.28.3 +PROMTOOL_VERSION ?= 2.47.2 +PROTOC_VERSION ?= 24.4 +SKAFFOLD_VERSION ?= v2.8.0 +YQ_VERSION ?= v4.35.2 +VGOPATH_VERSION ?= v0.1.3 # tool versions from go.mod CONTROLLER_GEN_VERSION ?= $(call version_gomod,sigs.k8s.io/controller-tools) @@ -124,7 +123,7 @@ ifeq ($(shell if [ -d $(TOOLS_BIN_SOURCE_DIR) ]; then echo "found"; fi),found) endif .PHONY: create-tools-bin -create-tools-bin: $(CONTROLLER_GEN) $(DOCFORGE) $(GEN_CRD_API_REFERENCE_DOCS) $(GINKGO) $(GOIMPORTS) $(GOIMPORTSREVISER) $(GO_ADD_LICENSE) $(GO_APIDIFF) $(GO_VULN_CHECK) $(GO_TO_PROTOBUF) $(HELM) $(IMPORT_BOSS) $(KIND) $(KUBECTL) $(MOCKGEN) $(OPENAPI_GEN) $(PROMTOOL) $(PROTOC) $(PROTOC_GEN_GOGO) $(SETUP_ENVTEST) $(SKAFFOLD) $(YAML2JSON) $(YQ) +create-tools-bin: $(CONTROLLER_GEN) $(GEN_CRD_API_REFERENCE_DOCS) $(GINKGO) $(GOIMPORTS) $(GOIMPORTSREVISER) $(GO_ADD_LICENSE) $(GO_APIDIFF) $(GO_VULN_CHECK) $(GO_TO_PROTOBUF) $(HELM) $(IMPORT_BOSS) $(KIND) $(KUBECTL) $(MOCKGEN) $(OPENAPI_GEN) $(PROMTOOL) $(PROTOC) $(PROTOC_GEN_GOGO) $(SETUP_ENVTEST) $(SKAFFOLD) $(YAML2JSON) $(YQ) $(VGOPATH) ######################################### # Tools # @@ -133,10 +132,6 @@ create-tools-bin: $(CONTROLLER_GEN) $(DOCFORGE) $(GEN_CRD_API_REFERENCE_DOCS) $( $(CONTROLLER_GEN): $(call tool_version_file,$(CONTROLLER_GEN),$(CONTROLLER_GEN_VERSION)) go build -o $(CONTROLLER_GEN) sigs.k8s.io/controller-tools/cmd/controller-gen -$(DOCFORGE): $(call tool_version_file,$(DOCFORGE),$(DOCFORGE_VERSION)) - curl -L -o $(DOCFORGE) https://github.com/gardener/docforge/releases/download/$(DOCFORGE_VERSION)/docforge-$(shell uname -s | tr '[:upper:]' '[:lower:]')-$(shell uname -m | sed 's/x86_64/amd64/') - chmod +x $(DOCFORGE) - $(GEN_CRD_API_REFERENCE_DOCS): $(call tool_version_file,$(GEN_CRD_API_REFERENCE_DOCS),$(GEN_CRD_API_REFERENCE_DOCS_VERSION)) go build -o $(GEN_CRD_API_REFERENCE_DOCS) github.com/ahmetb/gen-crd-api-reference-docs @@ -154,14 +149,6 @@ $(GOLANGCI_LINT): $(call tool_version_file,$(GOLANGCI_LINT),$(GOLANGCI_LINT_VERS @# see https://github.com/golangci/golangci-lint/issues/1276 GOBIN=$(abspath $(TOOLS_BIN_DIR)) CGO_ENABLED=1 go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION) -ifeq ($(strip $(shell go list -m 2>/dev/null)),github.com/gardener/gardener) -$(GOMEGACHECK): $(TOOLS_PKG_PATH)/gomegacheck/go.* $(shell find $(TOOLS_PKG_PATH)/gomegacheck -type f -name '*.go') - cd $(TOOLS_PKG_PATH)/gomegacheck; CGO_ENABLED=1 go build -o $(abspath $(GOMEGACHECK)) -buildmode=plugin ./plugin -else -$(GOMEGACHECK): go.mod - CGO_ENABLED=1 go build -o $(GOMEGACHECK) -buildmode=plugin github.com/gardener/gardener/hack/tools/gomegacheck/plugin -endif - $(GO_ADD_LICENSE): $(call tool_version_file,$(GO_ADD_LICENSE),$(GO_ADD_LICENSE_VERSION)) GOBIN=$(abspath $(TOOLS_BIN_DIR)) go install github.com/google/addlicense@$(GO_ADD_LICENSE_VERSION) @@ -232,3 +219,6 @@ $(YAML2JSON): $(call tool_version_file,$(YAML2JSON),$(YAML2JSON_VERSION)) $(YQ): $(call tool_version_file,$(YQ),$(YQ_VERSION)) curl -L -o $(YQ) https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(shell uname -s | tr '[:upper:]' '[:lower:]')_$(shell uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/') chmod +x $(YQ) + +$(VGOPATH): $(call tool_version_file,$(VGOPATH),$(VGOPATH_VERSION)) + go build -o $(VGOPATH) github.com/ironcore-dev/vgopath diff --git a/vendor/github.com/gardener/gardener/hack/update-codegen.sh b/vendor/github.com/gardener/gardener/hack/update-codegen.sh index db592cb29..4d404a194 100755 --- a/vendor/github.com/gardener/gardener/hack/update-codegen.sh +++ b/vendor/github.com/gardener/gardener/hack/update-codegen.sh @@ -39,15 +39,26 @@ AVAILABLE_CODEGEN_OPTIONS=( "shootdnsrewriting_groups" "provider_local_groups" "extensions_config_groups" + "nodeagent_groups" ) -# Friendly reminder if workspace location is not in $GOPATH -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -if [ "${SCRIPT_DIR}" != "$(realpath $GOPATH)/src/github.com/gardener/gardener/hack" ]; then - echo "'hack/update-codegen.sh' script does not work correctly if your workspace is outside GOPATH" - echo "Please check https://github.com/gardener/gardener/blob/master/docs/development/local_setup.md#get-the-sources" - exit 1 -fi +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +VGOPATH="$VGOPATH" + +# Ensure that if GOPATH is set, the GOPATH/{bin,pkg} directory exists. This seems to be not always +# the case in certain environments like Prow. As we will create a symlink against the bin folder we +# need to make sure that the bin directory is present in the GOPATH. +if [ -n "$GOPATH" ] && [ ! -d "$GOPATH/bin" ]; then mkdir -p "$GOPATH/bin"; fi +if [ -n "$GOPATH" ] && [ ! -d "$GOPATH/pkg" ]; then mkdir -p "$GOPATH/pkg"; fi + +VIRTUAL_GOPATH="$(mktemp -d)" +trap 'rm -rf "$VIRTUAL_GOPATH"' EXIT + +# Setup virtual GOPATH so the codegen tools work as expected. +(cd "$SCRIPT_DIR/.."; go mod download && "$VGOPATH" -o "$VIRTUAL_GOPATH") + +export GOROOT="${GOROOT:-"$(go env GOROOT)"}" +export GOPATH="$VIRTUAL_GOPATH" # We need to explicitly pass GO111MODULE=off to k8s.io/code-generator as it is significantly slower otherwise, # see https://github.com/kubernetes/code-generator/issues/100. @@ -379,6 +390,30 @@ resourcemanager_groups() { } export -f resourcemanager_groups +# Componentconfig for node-agent + +nodeagent_groups() { + echo "Generating API groups for pkg/nodeagent/apis/config" + + bash "${PROJECT_ROOT}"/hack/generate-internal-groups.sh \ + deepcopy,defaulter \ + github.com/gardener/gardener/pkg/client/componentconfig \ + github.com/gardener/gardener/pkg/nodeagent/apis \ + github.com/gardener/gardener/pkg/nodeagent/apis \ + "config:v1alpha1" \ + -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt" + + bash "${PROJECT_ROOT}"/hack/generate-internal-groups.sh \ + conversion \ + github.com/gardener/gardener/pkg/client/componentconfig \ + github.com/gardener/gardener/pkg/nodeagent/apis \ + github.com/gardener/gardener/pkg/nodeagent/apis \ + "config:v1alpha1" \ + --extra-peer-dirs=github.com/gardener/gardener/pkg/nodeagent/apis/config,github.com/gardener/gardener/pkg/nodeagent/apis/config/v1alpha1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/conversion,k8s.io/apimachinery/pkg/runtime,k8s.io/component-base/config,k8s.io/component-base/config/v1alpha1 \ + -h "${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt" +} +export -f nodeagent_groups + # Componentconfig for admission plugins shoottolerationrestriction_groups() { @@ -511,7 +546,7 @@ else break fi done - + if $valid; then valid_options+=("$option") else diff --git a/vendor/github.com/gardener/gardener/hack/update-protobuf.sh b/vendor/github.com/gardener/gardener/hack/update-protobuf.sh index c81a5ac13..e0583c301 100755 --- a/vendor/github.com/gardener/gardener/hack/update-protobuf.sh +++ b/vendor/github.com/gardener/gardener/hack/update-protobuf.sh @@ -18,13 +18,23 @@ set -o errexit set -o nounset set -o pipefail -# Friendly reminder if workspace location is not in $GOPATH -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -if [ "${SCRIPT_DIR}" != "$(realpath $GOPATH)/src/github.com/gardener/gardener/hack" ]; then - echo "'hack/update-protobuf.sh' script does not work correctly if your workspace is outside GOPATH" - echo "Please check https://github.com/gardener/gardener/blob/master/docs/development/local_setup.md#get-the-sources" - exit 1 -fi +SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +VGOPATH="$VGOPATH" + +# Ensure that if GOPATH is set, the GOPATH/{bin,pkg} directory exists. This seems to be not always +# the case in certain environments like Prow. As we will create a symlink against the bin folder we +# need to make sure that the bin directory is present in the GOPATH. +if [ -n "$GOPATH" ] && [ ! -d "$GOPATH/bin" ]; then mkdir -p "$GOPATH/bin"; fi +if [ -n "$GOPATH" ] && [ ! -d "$GOPATH/pkg" ]; then mkdir -p "$GOPATH/pkg"; fi + +VIRTUAL_GOPATH="$(mktemp -d)" +trap 'rm -rf "$VIRTUAL_GOPATH"' EXIT + +# Setup virtual GOPATH so the codegen tools work as expected. +(cd "$SCRIPT_DIR/.."; go mod download && "$VGOPATH" -o "$VIRTUAL_GOPATH") + +export GOROOT="${GOROOT:-"$(go env GOROOT)"}" +export GOPATH="$VIRTUAL_GOPATH" # We need to explicitly pass GO111MODULE=off to k8s.io/code-generator as it is significantly slower otherwise, # see https://github.com/kubernetes/code-generator/issues/100. @@ -53,5 +63,4 @@ read -ra PACKAGES <<< $(echo ${APIROOTS}) go-to-protobuf \ --packages="$(IFS=, ; echo "${PACKAGES[*]}")" \ --apimachinery-packages='-k8s.io/apimachinery/pkg/util/intstr,-k8s.io/apimachinery/pkg/api/resource,-k8s.io/apimachinery/pkg/runtime/schema,-k8s.io/apimachinery/pkg/runtime,-k8s.io/apimachinery/pkg/apis/meta/v1,-k8s.io/apimachinery/pkg/apis/meta/v1beta1,-k8s.io/api/core/v1,-k8s.io/api/rbac/v1,-k8s.io/api/autoscaling/v1,-k8s.io/api/networking/v1' \ - --go-header-file=${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt \ - --proto-import=${PROJECT_ROOT}/vendor + --go-header-file=${PROJECT_ROOT}/hack/LICENSE_BOILERPLATE.txt diff --git a/vendor/github.com/gardener/gardener/hack/update-skaffold-deps.sh b/vendor/github.com/gardener/gardener/hack/update-skaffold-deps.sh new file mode 100755 index 000000000..ec09bfd3b --- /dev/null +++ b/vendor/github.com/gardener/gardener/hack/update-skaffold-deps.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +repo_root="$(git rev-parse --show-toplevel)" +$repo_root/hack/check-skaffold-deps.sh update diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types.go index ac7692b80..212ecdedf 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types.go @@ -14,7 +14,9 @@ package core -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) const ( // GardenerSeedLeaseNamespace is the namespace in which Gardenlet will report Seeds' diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go index b77e77eb3..3c582e078 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_cloudprofile.go @@ -98,6 +98,11 @@ type MachineImage struct { Name string // Versions contains versions, expiration dates and container runtimes of the machine image Versions []MachineImageVersion + // UpdateStrategy is the update strategy to use for the machine image. Possible values are: + // - patch: update to the latest patch version of the current minor version. + // - minor: update to the latest minor and patch version. + // - major: always update to the overall latest version (default). + UpdateStrategy *MachineImageUpdateStrategy } // MachineImageVersion is an expirable version with list of supported container runtimes and interfaces @@ -212,3 +217,17 @@ const ( // and will eventually expire. ClassificationDeprecated VersionClassification = "deprecated" ) + +// MachineImageUpdateStrategy is the update strategy to use for a machine image +type MachineImageUpdateStrategy string + +const ( + // UpdateStrategyPatch indicates that auto-updates are performed to the latest patch version of the current minor version. + // When using an expired version during the maintenance window, force updates to the latest patch of the next (not necessarily consecutive) minor when using an expired version. + UpdateStrategyPatch MachineImageUpdateStrategy = "patch" + // UpdateStrategyMinor indicates that auto-updates are performed to the latest patch and minor version of the current major version. + // When using an expired version during the maintenance window, force updates to the latest minor and patch of the next (not necessarily consecutive) major version. + UpdateStrategyMinor MachineImageUpdateStrategy = "minor" + // UpdateStrategyMajor indicates that auto-updates are performed always to the overall latest version. + UpdateStrategyMajor MachineImageUpdateStrategy = "major" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go index 6192201fb..0cce61bd9 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_controllerregistration.go @@ -94,7 +94,7 @@ type ControllerRegistrationDeployment struct { // considered for a deployment. // An empty list means that all seeds are selected. SeedSelector *metav1.LabelSelector - // DeploymentRefs holds references to `ControllerDeployments`. Only one element is support now. + // DeploymentRefs holds references to `ControllerDeployments`. Only one element is supported currently. DeploymentRefs []DeploymentRef } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go index ffea2cbe4..86a1caac0 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_seed.go @@ -278,36 +278,12 @@ type SeedSettingVerticalPodAutoscaler struct { // SeedSettingDependencyWatchdog controls the dependency-watchdog settings for the seed. type SeedSettingDependencyWatchdog struct { - // Endpoint controls the endpoint settings for the dependency-watchdog for the seed. - // Deprecated: This field is deprecated and will be removed in a future version of Gardener. Use `Weeder` instead. - Endpoint *SeedSettingDependencyWatchdogEndpoint - // Probe controls the probe settings for the dependency-watchdog for the seed. - // Deprecated: This field is deprecated and will be removed in a future version of Gardener. Use `Prober` instead. - Probe *SeedSettingDependencyWatchdogProbe // Weeder controls the weeder settings for the dependency-watchdog for the seed. Weeder *SeedSettingDependencyWatchdogWeeder // Prober controls the prober settings for the dependency-watchdog for the seed. Prober *SeedSettingDependencyWatchdogProber } -// SeedSettingDependencyWatchdogEndpoint controls the endpoint settings for the dependency-watchdog for the seed. -// Deprecated: This type is deprecated and will be removed in a future version of Gardener. Use type `SeedSettingDependencyWatchdogWeeder` instead. -type SeedSettingDependencyWatchdogEndpoint struct { - // Enabled controls whether the endpoint controller of the dependency-watchdog should be enabled. This controller - // helps to alleviate the delay where control plane components remain unavailable by finding the respective pods in - // CrashLoopBackoff status and restarting them once their dependants become ready and available again. - Enabled bool -} - -// SeedSettingDependencyWatchdogProbe controls the probe settings for the dependency-watchdog for the seed. -// Deprecated: This type is deprecated and will be removed in a future version of Gardener. Use type `SeedSettingDependencyWatchdogProber` instead. -type SeedSettingDependencyWatchdogProbe struct { - // Enabled controls whether the probe controller of the dependency-watchdog should be enabled. This controller - // scales down the kube-controller-manager, machine-controller-manager and cluster-autoscaler of shoot clusters in case their respective kube-apiserver is not - // reachable via its external ingress in order to avoid melt-down situations. - Enabled bool -} - // SeedSettingDependencyWatchdogWeeder controls the weeder settings for the dependency-watchdog for the seed. type SeedSettingDependencyWatchdogWeeder struct { // Enabled controls whether the weeder of the dependency-watchdog should be enabled. This controller diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go index 035913257..d7d922162 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go @@ -479,6 +479,10 @@ type ClusterAutoscaler struct { MaxGracefulTerminationSeconds *int32 // IgnoreTaints specifies a list of taint keys to ignore in node templates when considering to scale a node group. IgnoreTaints []string + // NewPodScaleUpDelay specifies how long CA should ignore newly created pods before they have to be considered for scale-up. + NewPodScaleUpDelay *metav1.Duration + // MaxEmptyBulkDelete specifies the maximum number of empty nodes that can be deleted at the same time (default: 10). + MaxEmptyBulkDelete *int32 } // ExpanderMode is type used for Expander values diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go index 67a53c6b1..52884c2cc 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/constants/types_constants.go @@ -530,16 +530,6 @@ const ( // LabelExtensionProjectRole is a constant for a label value for extension project roles LabelExtensionProjectRole = "extension-project-role" - // LabelAPIServerExposure is a constant for label key which gardener can add to various objects related - // to kube-apiserver exposure. - // Deprecated: This label key is deprecated and will be removed after Gardener v1.80 has been released. - // TODO(rfranzke): Drop this after v1.80 has been released. - LabelAPIServerExposure = "core.gardener.cloud/apiserver-exposure" - // LabelAPIServerExposureGardenerManaged is a constant for label value which gardener sets on the label key - // "core.gardener.cloud/apiserver-exposure" to indicate that it's responsible for apiserver exposure (via SNI). - // Deprecated: This label key is deprecated and will be removed after Gardener v1.80 has been released. - // TODO(rfranzke): Drop this after v1.80 has been released. - LabelAPIServerExposureGardenerManaged = "gardener-managed" // LabelExposureClassHandlerName is the label key for exposure class handler names. LabelExposureClassHandlerName = "handler.exposureclass.gardener.cloud/name" @@ -563,6 +553,13 @@ const ( // DefaultIngressGatewayAppLabelValue is the ingress gateway value for the app label. DefaultIngressGatewayAppLabelValue = "istio-ingressgateway" + // DataTypeSecret is a constant for a value of the 'Type' field in 'GardenerResourceData' structs describing that + // the data is a secret. + DataTypeSecret = "secret" + // DataTypeMachineState is a constant for a value of the 'Type' field in 'GardenerResourceData' structs describing + // that the data is machine state. + DataTypeMachineState = "machine-state" + // DefaultSchedulerName is the name of the default scheduler. DefaultSchedulerName = "default-scheduler" // SchedulingPurpose is a constant for the key in a label describing the purpose of the scheduler related object. @@ -845,7 +842,10 @@ const ( TaintNodeCriticalComponentsNotReady = "node.gardener.cloud/critical-components-not-ready" // LabelNodeCriticalComponent is the label key for marking node-critical component pods. LabelNodeCriticalComponent = "node.gardener.cloud/critical-component" - // AnnotationWaitForCSINode is the annotation key for csi-driver-node pods, - // indicating they use the driver specified in the value. + // AnnotationPrefixWaitForCSINode is the annotation key for csi-driver-node pods, indicating they use the driver + // specified in the value. AnnotationPrefixWaitForCSINode = "node.gardener.cloud/wait-for-csi-node-" + + // GardenPurposeMachineClass is a constant for the 'machineclass' value in a label. + GardenPurposeMachineClass = "machineclass" ) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go index 009b717bf..2d23f2687 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults.go @@ -91,7 +91,13 @@ func SetDefaults_SeedNetworks(obj *SeedNetworks) { } // SetDefaults_SeedSettingDependencyWatchdog sets defaults for SeedSettingDependencyWatchdog objects. -func SetDefaults_SeedSettingDependencyWatchdog(_ *SeedSettingDependencyWatchdog) { +func SetDefaults_SeedSettingDependencyWatchdog(obj *SeedSettingDependencyWatchdog) { + if obj.Weeder == nil { + obj.Weeder = &SeedSettingDependencyWatchdogWeeder{Enabled: true} + } + if obj.Prober == nil { + obj.Prober = &SeedSettingDependencyWatchdogProber{Enabled: true} + } } // SetDefaults_Shoot sets default values for Shoot objects. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults_cloudprofile.go index 5b9bd84a2..a744a7db6 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults_cloudprofile.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/defaults_cloudprofile.go @@ -20,6 +20,14 @@ import ( v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" ) +// SetDefaults_MachineImage sets default values for MachineImage objects. +func SetDefaults_MachineImage(obj *MachineImage) { + if obj.UpdateStrategy == nil { + updateStrategyMajor := UpdateStrategyMajor + obj.UpdateStrategy = &updateStrategyMajor + } +} + // SetDefaults_MachineImageVersion sets default values for MachineImageVersion objects. func SetDefaults_MachineImageVersion(obj *MachineImageVersion) { if len(obj.CRI) == 0 { diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go index 7b21ca40d..86f454346 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.pb.go @@ -3553,66 +3553,10 @@ func (m *SeedSettingDependencyWatchdog) XXX_DiscardUnknown() { var xxx_messageInfo_SeedSettingDependencyWatchdog proto.InternalMessageInfo -func (m *SeedSettingDependencyWatchdogEndpoint) Reset() { *m = SeedSettingDependencyWatchdogEndpoint{} } -func (*SeedSettingDependencyWatchdogEndpoint) ProtoMessage() {} -func (*SeedSettingDependencyWatchdogEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{125} -} -func (m *SeedSettingDependencyWatchdogEndpoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SeedSettingDependencyWatchdogEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SeedSettingDependencyWatchdogEndpoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_SeedSettingDependencyWatchdogEndpoint.Merge(m, src) -} -func (m *SeedSettingDependencyWatchdogEndpoint) XXX_Size() int { - return m.Size() -} -func (m *SeedSettingDependencyWatchdogEndpoint) XXX_DiscardUnknown() { - xxx_messageInfo_SeedSettingDependencyWatchdogEndpoint.DiscardUnknown(m) -} - -var xxx_messageInfo_SeedSettingDependencyWatchdogEndpoint proto.InternalMessageInfo - -func (m *SeedSettingDependencyWatchdogProbe) Reset() { *m = SeedSettingDependencyWatchdogProbe{} } -func (*SeedSettingDependencyWatchdogProbe) ProtoMessage() {} -func (*SeedSettingDependencyWatchdogProbe) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{126} -} -func (m *SeedSettingDependencyWatchdogProbe) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SeedSettingDependencyWatchdogProbe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SeedSettingDependencyWatchdogProbe) XXX_Merge(src proto.Message) { - xxx_messageInfo_SeedSettingDependencyWatchdogProbe.Merge(m, src) -} -func (m *SeedSettingDependencyWatchdogProbe) XXX_Size() int { - return m.Size() -} -func (m *SeedSettingDependencyWatchdogProbe) XXX_DiscardUnknown() { - xxx_messageInfo_SeedSettingDependencyWatchdogProbe.DiscardUnknown(m) -} - -var xxx_messageInfo_SeedSettingDependencyWatchdogProbe proto.InternalMessageInfo - func (m *SeedSettingDependencyWatchdogProber) Reset() { *m = SeedSettingDependencyWatchdogProber{} } func (*SeedSettingDependencyWatchdogProber) ProtoMessage() {} func (*SeedSettingDependencyWatchdogProber) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{127} + return fileDescriptor_ca37af0df9a5bbd2, []int{125} } func (m *SeedSettingDependencyWatchdogProber) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3640,7 +3584,7 @@ var xxx_messageInfo_SeedSettingDependencyWatchdogProber proto.InternalMessageInf func (m *SeedSettingDependencyWatchdogWeeder) Reset() { *m = SeedSettingDependencyWatchdogWeeder{} } func (*SeedSettingDependencyWatchdogWeeder) ProtoMessage() {} func (*SeedSettingDependencyWatchdogWeeder) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{128} + return fileDescriptor_ca37af0df9a5bbd2, []int{126} } func (m *SeedSettingDependencyWatchdogWeeder) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3668,7 +3612,7 @@ var xxx_messageInfo_SeedSettingDependencyWatchdogWeeder proto.InternalMessageInf func (m *SeedSettingExcessCapacityReservation) Reset() { *m = SeedSettingExcessCapacityReservation{} } func (*SeedSettingExcessCapacityReservation) ProtoMessage() {} func (*SeedSettingExcessCapacityReservation) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{129} + return fileDescriptor_ca37af0df9a5bbd2, []int{127} } func (m *SeedSettingExcessCapacityReservation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3698,7 +3642,7 @@ func (m *SeedSettingExcessCapacityReservationConfig) Reset() { } func (*SeedSettingExcessCapacityReservationConfig) ProtoMessage() {} func (*SeedSettingExcessCapacityReservationConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{130} + return fileDescriptor_ca37af0df9a5bbd2, []int{128} } func (m *SeedSettingExcessCapacityReservationConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3726,7 +3670,7 @@ var xxx_messageInfo_SeedSettingExcessCapacityReservationConfig proto.InternalMes func (m *SeedSettingLoadBalancerServices) Reset() { *m = SeedSettingLoadBalancerServices{} } func (*SeedSettingLoadBalancerServices) ProtoMessage() {} func (*SeedSettingLoadBalancerServices) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{131} + return fileDescriptor_ca37af0df9a5bbd2, []int{129} } func (m *SeedSettingLoadBalancerServices) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3754,7 +3698,7 @@ var xxx_messageInfo_SeedSettingLoadBalancerServices proto.InternalMessageInfo func (m *SeedSettingLoadBalancerServicesZones) Reset() { *m = SeedSettingLoadBalancerServicesZones{} } func (*SeedSettingLoadBalancerServicesZones) ProtoMessage() {} func (*SeedSettingLoadBalancerServicesZones) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{132} + return fileDescriptor_ca37af0df9a5bbd2, []int{130} } func (m *SeedSettingLoadBalancerServicesZones) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3782,7 +3726,7 @@ var xxx_messageInfo_SeedSettingLoadBalancerServicesZones proto.InternalMessageIn func (m *SeedSettingScheduling) Reset() { *m = SeedSettingScheduling{} } func (*SeedSettingScheduling) ProtoMessage() {} func (*SeedSettingScheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{133} + return fileDescriptor_ca37af0df9a5bbd2, []int{131} } func (m *SeedSettingScheduling) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3810,7 +3754,7 @@ var xxx_messageInfo_SeedSettingScheduling proto.InternalMessageInfo func (m *SeedSettingTopologyAwareRouting) Reset() { *m = SeedSettingTopologyAwareRouting{} } func (*SeedSettingTopologyAwareRouting) ProtoMessage() {} func (*SeedSettingTopologyAwareRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{134} + return fileDescriptor_ca37af0df9a5bbd2, []int{132} } func (m *SeedSettingTopologyAwareRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3838,7 +3782,7 @@ var xxx_messageInfo_SeedSettingTopologyAwareRouting proto.InternalMessageInfo func (m *SeedSettingVerticalPodAutoscaler) Reset() { *m = SeedSettingVerticalPodAutoscaler{} } func (*SeedSettingVerticalPodAutoscaler) ProtoMessage() {} func (*SeedSettingVerticalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{135} + return fileDescriptor_ca37af0df9a5bbd2, []int{133} } func (m *SeedSettingVerticalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3866,7 +3810,7 @@ var xxx_messageInfo_SeedSettingVerticalPodAutoscaler proto.InternalMessageInfo func (m *SeedSettings) Reset() { *m = SeedSettings{} } func (*SeedSettings) ProtoMessage() {} func (*SeedSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{136} + return fileDescriptor_ca37af0df9a5bbd2, []int{134} } func (m *SeedSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3894,7 +3838,7 @@ var xxx_messageInfo_SeedSettings proto.InternalMessageInfo func (m *SeedSpec) Reset() { *m = SeedSpec{} } func (*SeedSpec) ProtoMessage() {} func (*SeedSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{137} + return fileDescriptor_ca37af0df9a5bbd2, []int{135} } func (m *SeedSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3922,7 +3866,7 @@ var xxx_messageInfo_SeedSpec proto.InternalMessageInfo func (m *SeedStatus) Reset() { *m = SeedStatus{} } func (*SeedStatus) ProtoMessage() {} func (*SeedStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{138} + return fileDescriptor_ca37af0df9a5bbd2, []int{136} } func (m *SeedStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3950,7 +3894,7 @@ var xxx_messageInfo_SeedStatus proto.InternalMessageInfo func (m *SeedTaint) Reset() { *m = SeedTaint{} } func (*SeedTaint) ProtoMessage() {} func (*SeedTaint) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{139} + return fileDescriptor_ca37af0df9a5bbd2, []int{137} } func (m *SeedTaint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3978,7 +3922,7 @@ var xxx_messageInfo_SeedTaint proto.InternalMessageInfo func (m *SeedTemplate) Reset() { *m = SeedTemplate{} } func (*SeedTemplate) ProtoMessage() {} func (*SeedTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{140} + return fileDescriptor_ca37af0df9a5bbd2, []int{138} } func (m *SeedTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4006,7 +3950,7 @@ var xxx_messageInfo_SeedTemplate proto.InternalMessageInfo func (m *SeedVolume) Reset() { *m = SeedVolume{} } func (*SeedVolume) ProtoMessage() {} func (*SeedVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{141} + return fileDescriptor_ca37af0df9a5bbd2, []int{139} } func (m *SeedVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4034,7 +3978,7 @@ var xxx_messageInfo_SeedVolume proto.InternalMessageInfo func (m *SeedVolumeProvider) Reset() { *m = SeedVolumeProvider{} } func (*SeedVolumeProvider) ProtoMessage() {} func (*SeedVolumeProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{142} + return fileDescriptor_ca37af0df9a5bbd2, []int{140} } func (m *SeedVolumeProvider) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4062,7 +4006,7 @@ var xxx_messageInfo_SeedVolumeProvider proto.InternalMessageInfo func (m *ServiceAccountConfig) Reset() { *m = ServiceAccountConfig{} } func (*ServiceAccountConfig) ProtoMessage() {} func (*ServiceAccountConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{143} + return fileDescriptor_ca37af0df9a5bbd2, []int{141} } func (m *ServiceAccountConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4090,7 +4034,7 @@ var xxx_messageInfo_ServiceAccountConfig proto.InternalMessageInfo func (m *ServiceAccountKeyRotation) Reset() { *m = ServiceAccountKeyRotation{} } func (*ServiceAccountKeyRotation) ProtoMessage() {} func (*ServiceAccountKeyRotation) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{144} + return fileDescriptor_ca37af0df9a5bbd2, []int{142} } func (m *ServiceAccountKeyRotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4118,7 +4062,7 @@ var xxx_messageInfo_ServiceAccountKeyRotation proto.InternalMessageInfo func (m *Shoot) Reset() { *m = Shoot{} } func (*Shoot) ProtoMessage() {} func (*Shoot) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{145} + return fileDescriptor_ca37af0df9a5bbd2, []int{143} } func (m *Shoot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4146,7 +4090,7 @@ var xxx_messageInfo_Shoot proto.InternalMessageInfo func (m *ShootAdvertisedAddress) Reset() { *m = ShootAdvertisedAddress{} } func (*ShootAdvertisedAddress) ProtoMessage() {} func (*ShootAdvertisedAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{146} + return fileDescriptor_ca37af0df9a5bbd2, []int{144} } func (m *ShootAdvertisedAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4174,7 +4118,7 @@ var xxx_messageInfo_ShootAdvertisedAddress proto.InternalMessageInfo func (m *ShootCredentials) Reset() { *m = ShootCredentials{} } func (*ShootCredentials) ProtoMessage() {} func (*ShootCredentials) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{147} + return fileDescriptor_ca37af0df9a5bbd2, []int{145} } func (m *ShootCredentials) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4202,7 +4146,7 @@ var xxx_messageInfo_ShootCredentials proto.InternalMessageInfo func (m *ShootCredentialsRotation) Reset() { *m = ShootCredentialsRotation{} } func (*ShootCredentialsRotation) ProtoMessage() {} func (*ShootCredentialsRotation) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{148} + return fileDescriptor_ca37af0df9a5bbd2, []int{146} } func (m *ShootCredentialsRotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4230,7 +4174,7 @@ var xxx_messageInfo_ShootCredentialsRotation proto.InternalMessageInfo func (m *ShootKubeconfigRotation) Reset() { *m = ShootKubeconfigRotation{} } func (*ShootKubeconfigRotation) ProtoMessage() {} func (*ShootKubeconfigRotation) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{149} + return fileDescriptor_ca37af0df9a5bbd2, []int{147} } func (m *ShootKubeconfigRotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4258,7 +4202,7 @@ var xxx_messageInfo_ShootKubeconfigRotation proto.InternalMessageInfo func (m *ShootList) Reset() { *m = ShootList{} } func (*ShootList) ProtoMessage() {} func (*ShootList) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{150} + return fileDescriptor_ca37af0df9a5bbd2, []int{148} } func (m *ShootList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4286,7 +4230,7 @@ var xxx_messageInfo_ShootList proto.InternalMessageInfo func (m *ShootMachineImage) Reset() { *m = ShootMachineImage{} } func (*ShootMachineImage) ProtoMessage() {} func (*ShootMachineImage) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{151} + return fileDescriptor_ca37af0df9a5bbd2, []int{149} } func (m *ShootMachineImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4314,7 +4258,7 @@ var xxx_messageInfo_ShootMachineImage proto.InternalMessageInfo func (m *ShootNetworks) Reset() { *m = ShootNetworks{} } func (*ShootNetworks) ProtoMessage() {} func (*ShootNetworks) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{152} + return fileDescriptor_ca37af0df9a5bbd2, []int{150} } func (m *ShootNetworks) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4342,7 +4286,7 @@ var xxx_messageInfo_ShootNetworks proto.InternalMessageInfo func (m *ShootSSHKeypairRotation) Reset() { *m = ShootSSHKeypairRotation{} } func (*ShootSSHKeypairRotation) ProtoMessage() {} func (*ShootSSHKeypairRotation) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{153} + return fileDescriptor_ca37af0df9a5bbd2, []int{151} } func (m *ShootSSHKeypairRotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4370,7 +4314,7 @@ var xxx_messageInfo_ShootSSHKeypairRotation proto.InternalMessageInfo func (m *ShootSpec) Reset() { *m = ShootSpec{} } func (*ShootSpec) ProtoMessage() {} func (*ShootSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{154} + return fileDescriptor_ca37af0df9a5bbd2, []int{152} } func (m *ShootSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4398,7 +4342,7 @@ var xxx_messageInfo_ShootSpec proto.InternalMessageInfo func (m *ShootState) Reset() { *m = ShootState{} } func (*ShootState) ProtoMessage() {} func (*ShootState) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{155} + return fileDescriptor_ca37af0df9a5bbd2, []int{153} } func (m *ShootState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4426,7 +4370,7 @@ var xxx_messageInfo_ShootState proto.InternalMessageInfo func (m *ShootStateList) Reset() { *m = ShootStateList{} } func (*ShootStateList) ProtoMessage() {} func (*ShootStateList) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{156} + return fileDescriptor_ca37af0df9a5bbd2, []int{154} } func (m *ShootStateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4454,7 +4398,7 @@ var xxx_messageInfo_ShootStateList proto.InternalMessageInfo func (m *ShootStateSpec) Reset() { *m = ShootStateSpec{} } func (*ShootStateSpec) ProtoMessage() {} func (*ShootStateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{157} + return fileDescriptor_ca37af0df9a5bbd2, []int{155} } func (m *ShootStateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4482,7 +4426,7 @@ var xxx_messageInfo_ShootStateSpec proto.InternalMessageInfo func (m *ShootStatus) Reset() { *m = ShootStatus{} } func (*ShootStatus) ProtoMessage() {} func (*ShootStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{158} + return fileDescriptor_ca37af0df9a5bbd2, []int{156} } func (m *ShootStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4510,7 +4454,7 @@ var xxx_messageInfo_ShootStatus proto.InternalMessageInfo func (m *ShootTemplate) Reset() { *m = ShootTemplate{} } func (*ShootTemplate) ProtoMessage() {} func (*ShootTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{159} + return fileDescriptor_ca37af0df9a5bbd2, []int{157} } func (m *ShootTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4538,7 +4482,7 @@ var xxx_messageInfo_ShootTemplate proto.InternalMessageInfo func (m *SystemComponents) Reset() { *m = SystemComponents{} } func (*SystemComponents) ProtoMessage() {} func (*SystemComponents) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{160} + return fileDescriptor_ca37af0df9a5bbd2, []int{158} } func (m *SystemComponents) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4566,7 +4510,7 @@ var xxx_messageInfo_SystemComponents proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{161} + return fileDescriptor_ca37af0df9a5bbd2, []int{159} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4594,7 +4538,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *VerticalPodAutoscaler) Reset() { *m = VerticalPodAutoscaler{} } func (*VerticalPodAutoscaler) ProtoMessage() {} func (*VerticalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{162} + return fileDescriptor_ca37af0df9a5bbd2, []int{160} } func (m *VerticalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4622,7 +4566,7 @@ var xxx_messageInfo_VerticalPodAutoscaler proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{163} + return fileDescriptor_ca37af0df9a5bbd2, []int{161} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4650,7 +4594,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeType) Reset() { *m = VolumeType{} } func (*VolumeType) ProtoMessage() {} func (*VolumeType) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{164} + return fileDescriptor_ca37af0df9a5bbd2, []int{162} } func (m *VolumeType) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4678,7 +4622,7 @@ var xxx_messageInfo_VolumeType proto.InternalMessageInfo func (m *WatchCacheSizes) Reset() { *m = WatchCacheSizes{} } func (*WatchCacheSizes) ProtoMessage() {} func (*WatchCacheSizes) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{165} + return fileDescriptor_ca37af0df9a5bbd2, []int{163} } func (m *WatchCacheSizes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4706,7 +4650,7 @@ var xxx_messageInfo_WatchCacheSizes proto.InternalMessageInfo func (m *Worker) Reset() { *m = Worker{} } func (*Worker) ProtoMessage() {} func (*Worker) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{166} + return fileDescriptor_ca37af0df9a5bbd2, []int{164} } func (m *Worker) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4734,7 +4678,7 @@ var xxx_messageInfo_Worker proto.InternalMessageInfo func (m *WorkerKubernetes) Reset() { *m = WorkerKubernetes{} } func (*WorkerKubernetes) ProtoMessage() {} func (*WorkerKubernetes) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{167} + return fileDescriptor_ca37af0df9a5bbd2, []int{165} } func (m *WorkerKubernetes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4762,7 +4706,7 @@ var xxx_messageInfo_WorkerKubernetes proto.InternalMessageInfo func (m *WorkerSystemComponents) Reset() { *m = WorkerSystemComponents{} } func (*WorkerSystemComponents) ProtoMessage() {} func (*WorkerSystemComponents) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{168} + return fileDescriptor_ca37af0df9a5bbd2, []int{166} } func (m *WorkerSystemComponents) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4790,7 +4734,7 @@ var xxx_messageInfo_WorkerSystemComponents proto.InternalMessageInfo func (m *WorkersSettings) Reset() { *m = WorkersSettings{} } func (*WorkersSettings) ProtoMessage() {} func (*WorkersSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_ca37af0df9a5bbd2, []int{169} + return fileDescriptor_ca37af0df9a5bbd2, []int{167} } func (m *WorkersSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4951,8 +4895,6 @@ func init() { proto.RegisterType((*SeedProvider)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedProvider") proto.RegisterType((*SeedSelector)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSelector") proto.RegisterType((*SeedSettingDependencyWatchdog)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingDependencyWatchdog") - proto.RegisterType((*SeedSettingDependencyWatchdogEndpoint)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingDependencyWatchdogEndpoint") - proto.RegisterType((*SeedSettingDependencyWatchdogProbe)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingDependencyWatchdogProbe") proto.RegisterType((*SeedSettingDependencyWatchdogProber)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingDependencyWatchdogProber") proto.RegisterType((*SeedSettingDependencyWatchdogWeeder)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingDependencyWatchdogWeeder") proto.RegisterType((*SeedSettingExcessCapacityReservation)(nil), "github.com.gardener.gardener.pkg.apis.core.v1beta1.SeedSettingExcessCapacityReservation") @@ -5012,754 +4954,755 @@ func init() { } var fileDescriptor_ca37af0df9a5bbd2 = []byte{ - // 11952 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x6c, 0x24, 0xc9, - 0x75, 0x18, 0xae, 0x1e, 0x7e, 0x0d, 0x1f, 0x3f, 0x96, 0x5b, 0xfb, 0x71, 0x5c, 0xee, 0xdd, 0xce, - 0xaa, 0xef, 0xa4, 0xdf, 0x9d, 0x4f, 0xe6, 0xfa, 0xce, 0x92, 0x4e, 0xb7, 0xd2, 0xe9, 0x44, 0xce, - 0x70, 0x77, 0x47, 0x4b, 0x72, 0x79, 0x35, 0xdc, 0xbb, 0xd3, 0xc9, 0xbf, 0xb3, 0x9a, 0xd3, 0xc5, - 0x61, 0x1f, 0x7b, 0xba, 0xe7, 0xba, 0x7b, 0xb8, 0x9c, 0x3b, 0x29, 0xb2, 0x64, 0x4b, 0xb1, 0xce, - 0x56, 0x60, 0x08, 0x70, 0x14, 0x49, 0x0e, 0x2c, 0xc3, 0x70, 0x9c, 0xc4, 0x81, 0x63, 0x38, 0x70, - 0x00, 0xdb, 0x08, 0x60, 0x08, 0x48, 0x2c, 0x19, 0x96, 0x21, 0x48, 0x09, 0x22, 0x21, 0x31, 0x1d, - 0x31, 0x8a, 0x1c, 0x20, 0x81, 0x11, 0xc0, 0x08, 0x82, 0x6c, 0x0c, 0x27, 0xa8, 0xaf, 0xee, 0xea, - 0xaf, 0x21, 0xd9, 0x43, 0x52, 0x3a, 0xd8, 0x7f, 0x91, 0x53, 0x1f, 0xef, 0x55, 0x55, 0x57, 0xbd, - 0x7a, 0xef, 0xd5, 0xfb, 0x80, 0xc5, 0x96, 0x15, 0x6c, 0x75, 0x37, 0xe6, 0x9b, 0x6e, 0xfb, 0x5a, - 0xcb, 0xf0, 0x4c, 0xe2, 0x10, 0x2f, 0xfa, 0xa7, 0xb3, 0xdd, 0xba, 0x66, 0x74, 0x2c, 0xff, 0x5a, - 0xd3, 0xf5, 0xc8, 0xb5, 0x9d, 0x27, 0x36, 0x48, 0x60, 0x3c, 0x71, 0xad, 0x45, 0xeb, 0x8c, 0x80, - 0x98, 0xf3, 0x1d, 0xcf, 0x0d, 0x5c, 0xf4, 0x64, 0x04, 0x63, 0x5e, 0x76, 0x8d, 0xfe, 0xe9, 0x6c, - 0xb7, 0xe6, 0x29, 0x8c, 0x79, 0x0a, 0x63, 0x5e, 0xc0, 0x98, 0xfb, 0x51, 0x15, 0xaf, 0xdb, 0x72, - 0xaf, 0x31, 0x50, 0x1b, 0xdd, 0x4d, 0xf6, 0x8b, 0xfd, 0x60, 0xff, 0x71, 0x14, 0x73, 0x8f, 0x6d, - 0xbf, 0xc7, 0x9f, 0xb7, 0x5c, 0x3a, 0x98, 0x6b, 0x46, 0x37, 0x70, 0xfd, 0xa6, 0x61, 0x5b, 0x4e, - 0xeb, 0xda, 0x4e, 0x6a, 0x34, 0x73, 0xba, 0xd2, 0x54, 0x0c, 0xbb, 0x6f, 0x1b, 0x6f, 0xc3, 0x68, - 0x66, 0xb5, 0x79, 0x67, 0xd4, 0xa6, 0x6d, 0x34, 0xb7, 0x2c, 0x87, 0x78, 0x3d, 0xb9, 0x20, 0xd7, - 0x3c, 0xe2, 0xbb, 0x5d, 0xaf, 0x49, 0x8e, 0xd4, 0xcb, 0xbf, 0xd6, 0x26, 0x81, 0x91, 0x85, 0xeb, - 0x5a, 0x5e, 0x2f, 0xaf, 0xeb, 0x04, 0x56, 0x3b, 0x8d, 0xe6, 0xdd, 0x07, 0x75, 0xf0, 0x9b, 0x5b, - 0xa4, 0x6d, 0xa4, 0xfa, 0xfd, 0x78, 0x5e, 0xbf, 0x6e, 0x60, 0xd9, 0xd7, 0x2c, 0x27, 0xf0, 0x03, - 0x2f, 0xd9, 0x49, 0x7f, 0x43, 0x83, 0x99, 0x85, 0xb5, 0x7a, 0x83, 0x78, 0x3b, 0xc4, 0x5b, 0x76, - 0x5b, 0x2d, 0xcb, 0x69, 0xa1, 0xc7, 0x61, 0x7c, 0x87, 0x78, 0x1b, 0xae, 0x6f, 0x05, 0xbd, 0x59, - 0xed, 0xaa, 0xf6, 0xe8, 0xc8, 0xe2, 0xd4, 0xfe, 0x5e, 0x65, 0xfc, 0x79, 0x59, 0x88, 0xa3, 0x7a, - 0x54, 0x87, 0x73, 0x5b, 0x41, 0xd0, 0x59, 0x68, 0x36, 0x89, 0xef, 0x87, 0x2d, 0x66, 0x4b, 0xac, - 0xdb, 0x03, 0xfb, 0x7b, 0x95, 0x73, 0xb7, 0xd6, 0xd7, 0xd7, 0x12, 0xd5, 0x38, 0xab, 0x8f, 0xfe, - 0xdb, 0x1a, 0x9c, 0x0d, 0x07, 0x83, 0xc9, 0xab, 0x5d, 0xe2, 0x07, 0x3e, 0xc2, 0x70, 0xb1, 0x6d, - 0xec, 0xae, 0xba, 0xce, 0x4a, 0x37, 0x30, 0x02, 0xcb, 0x69, 0xd5, 0x9d, 0x4d, 0xdb, 0x6a, 0x6d, - 0x05, 0x62, 0x68, 0x73, 0xfb, 0x7b, 0x95, 0x8b, 0x2b, 0x99, 0x2d, 0x70, 0x4e, 0x4f, 0x3a, 0xe8, - 0xb6, 0xb1, 0x9b, 0x02, 0xa8, 0x0c, 0x7a, 0x25, 0x5d, 0x8d, 0xb3, 0xfa, 0xe8, 0x4f, 0xc2, 0xc8, - 0x82, 0x69, 0xba, 0x0e, 0x7a, 0x0c, 0xc6, 0x88, 0x63, 0x6c, 0xd8, 0xc4, 0x64, 0x03, 0x2b, 0x2f, - 0x9e, 0xf9, 0xea, 0x5e, 0xe5, 0x2d, 0xfb, 0x7b, 0x95, 0xb1, 0x25, 0x5e, 0x8c, 0x65, 0xbd, 0xfe, - 0x8b, 0x25, 0x18, 0x65, 0x9d, 0x7c, 0xf4, 0x39, 0x0d, 0xce, 0x6d, 0x77, 0x37, 0x88, 0xe7, 0x90, - 0x80, 0xf8, 0x35, 0xc3, 0xdf, 0xda, 0x70, 0x0d, 0x8f, 0x83, 0x98, 0x78, 0xf2, 0xe6, 0xfc, 0xd1, - 0xcf, 0xdf, 0xfc, 0xed, 0x34, 0x38, 0x3e, 0xa7, 0x8c, 0x0a, 0x9c, 0x85, 0x1c, 0xed, 0xc0, 0xa4, - 0xd3, 0xb2, 0x9c, 0xdd, 0xba, 0xd3, 0xf2, 0x88, 0xef, 0xb3, 0x75, 0x99, 0x78, 0xf2, 0x03, 0x45, - 0x06, 0xb3, 0xaa, 0xc0, 0x59, 0x9c, 0xd9, 0xdf, 0xab, 0x4c, 0xaa, 0x25, 0x38, 0x86, 0x47, 0xff, - 0x6b, 0x0d, 0xce, 0x2c, 0x98, 0x6d, 0xcb, 0xf7, 0x2d, 0xd7, 0x59, 0xb3, 0xbb, 0x2d, 0xcb, 0x41, - 0x57, 0x61, 0xd8, 0x31, 0xda, 0x84, 0x2d, 0xc8, 0xf8, 0xe2, 0xa4, 0x58, 0xd3, 0xe1, 0x55, 0xa3, - 0x4d, 0x30, 0xab, 0x41, 0xcf, 0xc1, 0x68, 0xd3, 0x75, 0x36, 0xad, 0x96, 0x18, 0xe7, 0x8f, 0xce, - 0xf3, 0x93, 0x30, 0xaf, 0x9e, 0x04, 0x36, 0x3c, 0x71, 0x82, 0xe6, 0xb1, 0x71, 0x6f, 0x69, 0x37, - 0x20, 0x0e, 0x45, 0xb3, 0x08, 0xfb, 0x7b, 0x95, 0xd1, 0x2a, 0x03, 0x80, 0x05, 0x20, 0xf4, 0x28, - 0x94, 0x4d, 0xcb, 0xe7, 0x1f, 0x73, 0x88, 0x7d, 0xcc, 0xc9, 0xfd, 0xbd, 0x4a, 0xb9, 0x26, 0xca, - 0x70, 0x58, 0x8b, 0x96, 0xe1, 0x3c, 0x5d, 0x41, 0xde, 0xaf, 0x41, 0x9a, 0x1e, 0x09, 0xe8, 0xd0, - 0x66, 0x87, 0xd9, 0x70, 0x67, 0xf7, 0xf7, 0x2a, 0xe7, 0x6f, 0x67, 0xd4, 0xe3, 0xcc, 0x5e, 0xfa, - 0x0d, 0x28, 0x2f, 0xd8, 0xc4, 0xa3, 0x1b, 0x0c, 0x5d, 0x87, 0x69, 0xd2, 0x36, 0x2c, 0x1b, 0x93, - 0x26, 0xb1, 0x76, 0x88, 0xe7, 0xcf, 0x6a, 0x57, 0x87, 0x1e, 0x1d, 0x5f, 0x44, 0xfb, 0x7b, 0x95, - 0xe9, 0xa5, 0x58, 0x0d, 0x4e, 0xb4, 0xd4, 0x3f, 0xa1, 0xc1, 0xc4, 0x42, 0xd7, 0xb4, 0x02, 0x3e, - 0x2f, 0xe4, 0xc1, 0x84, 0x41, 0x7f, 0xae, 0xb9, 0xb6, 0xd5, 0xec, 0x89, 0xcd, 0xf5, 0x6c, 0x91, - 0xef, 0xb9, 0x10, 0x81, 0x59, 0x3c, 0xb3, 0xbf, 0x57, 0x99, 0x50, 0x0a, 0xb0, 0x8a, 0x44, 0xdf, - 0x02, 0xb5, 0x0e, 0x7d, 0x08, 0x26, 0xf9, 0x74, 0x57, 0x8c, 0x0e, 0x26, 0x9b, 0x62, 0x0c, 0x0f, - 0x2b, 0xdf, 0x4a, 0x22, 0x9a, 0xbf, 0xb3, 0xf1, 0x0a, 0x69, 0x06, 0x98, 0x6c, 0x12, 0x8f, 0x38, - 0x4d, 0xc2, 0xb7, 0x4d, 0x55, 0xe9, 0x8c, 0x63, 0xa0, 0xf4, 0x3f, 0xa3, 0x44, 0x6c, 0xc7, 0xb0, - 0x6c, 0x63, 0xc3, 0xb2, 0xad, 0xa0, 0xf7, 0x92, 0xeb, 0x90, 0x43, 0xec, 0x9b, 0xbb, 0xf0, 0x40, - 0xd7, 0x31, 0x78, 0x3f, 0x9b, 0xac, 0xf0, 0x9d, 0xb2, 0xde, 0xeb, 0x10, 0xba, 0xe1, 0xe9, 0x4a, - 0x5f, 0xde, 0xdf, 0xab, 0x3c, 0x70, 0x37, 0xbb, 0x09, 0xce, 0xeb, 0x4b, 0xe9, 0x95, 0x52, 0xf5, - 0xbc, 0x6b, 0x77, 0xdb, 0x02, 0xea, 0x10, 0x83, 0xca, 0xe8, 0xd5, 0xdd, 0xcc, 0x16, 0x38, 0xa7, - 0xa7, 0xfe, 0xd5, 0x12, 0x4c, 0x2e, 0x1a, 0xcd, 0xed, 0x6e, 0x67, 0xb1, 0xdb, 0xdc, 0x26, 0x01, - 0xfa, 0x08, 0x94, 0xe9, 0x85, 0x63, 0x1a, 0x81, 0x21, 0x56, 0xf2, 0xc7, 0x72, 0x77, 0x3d, 0xfb, - 0x88, 0xb4, 0x75, 0xb4, 0xb6, 0x2b, 0x24, 0x30, 0x16, 0x91, 0x58, 0x13, 0x88, 0xca, 0x70, 0x08, - 0x15, 0x6d, 0xc2, 0xb0, 0xdf, 0x21, 0x4d, 0x71, 0xa6, 0x6a, 0x45, 0xf6, 0x8a, 0x3a, 0xe2, 0x46, - 0x87, 0x34, 0xa3, 0xaf, 0x40, 0x7f, 0x61, 0x06, 0x1f, 0x39, 0x30, 0xea, 0x07, 0x46, 0xd0, 0xf5, - 0xd9, 0x41, 0x9b, 0x78, 0xf2, 0xc6, 0xc0, 0x98, 0x18, 0xb4, 0xc5, 0x69, 0x81, 0x6b, 0x94, 0xff, - 0xc6, 0x02, 0x8b, 0xfe, 0xef, 0x35, 0x98, 0x51, 0x9b, 0x2f, 0x5b, 0x7e, 0x80, 0x7e, 0x22, 0xb5, - 0x9c, 0xf3, 0x87, 0x5b, 0x4e, 0xda, 0x9b, 0x2d, 0xe6, 0x8c, 0x40, 0x57, 0x96, 0x25, 0xca, 0x52, - 0x12, 0x18, 0xb1, 0x02, 0xd2, 0xe6, 0xdb, 0xaa, 0x20, 0x1d, 0x55, 0x87, 0xbc, 0x38, 0x25, 0x90, - 0x8d, 0xd4, 0x29, 0x58, 0xcc, 0xa1, 0xeb, 0x1f, 0x81, 0xf3, 0x6a, 0xab, 0x35, 0xcf, 0xdd, 0xb1, - 0x4c, 0xe2, 0xd1, 0x93, 0x10, 0xf4, 0x3a, 0xa9, 0x93, 0x40, 0x77, 0x16, 0x66, 0x35, 0xe8, 0xed, - 0x30, 0xea, 0x91, 0x96, 0xe5, 0x3a, 0xec, 0x6b, 0x8f, 0x47, 0x6b, 0x87, 0x59, 0x29, 0x16, 0xb5, - 0xfa, 0xff, 0x2c, 0xc5, 0xd7, 0x8e, 0x7e, 0x46, 0xb4, 0x03, 0xe5, 0x8e, 0x40, 0x25, 0xd6, 0xee, - 0xd6, 0xa0, 0x13, 0x94, 0x43, 0x8f, 0x56, 0x55, 0x96, 0xe0, 0x10, 0x17, 0xb2, 0x60, 0x5a, 0xfe, - 0x5f, 0x1d, 0x80, 0xfc, 0x33, 0x72, 0xba, 0x16, 0x03, 0x84, 0x13, 0x80, 0xd1, 0x3a, 0x8c, 0xfb, - 0x8c, 0x48, 0x53, 0xc2, 0x35, 0x94, 0x4f, 0xb8, 0x1a, 0xb2, 0x91, 0x20, 0x5c, 0x67, 0xc5, 0xf0, - 0xc7, 0xc3, 0x0a, 0x1c, 0x01, 0xa2, 0x97, 0x8c, 0x4f, 0x88, 0xa9, 0x5c, 0x17, 0xec, 0x92, 0x69, - 0x88, 0x32, 0x1c, 0xd6, 0xea, 0x5f, 0x1e, 0x06, 0x94, 0xde, 0xe2, 0xea, 0x0a, 0xf0, 0x12, 0xb1, - 0xfe, 0x83, 0xac, 0x80, 0x38, 0x2d, 0x09, 0xc0, 0xe8, 0x35, 0x98, 0xb2, 0x0d, 0x3f, 0xb8, 0xd3, - 0xa1, 0xdc, 0xa3, 0xdc, 0x28, 0x13, 0x4f, 0x2e, 0x14, 0xf9, 0xd2, 0xcb, 0x2a, 0xa0, 0xc5, 0xb3, - 0xfb, 0x7b, 0x95, 0xa9, 0x58, 0x11, 0x8e, 0xa3, 0x42, 0xaf, 0xc0, 0x38, 0x2d, 0x58, 0xf2, 0x3c, - 0xd7, 0x13, 0xab, 0xff, 0x4c, 0x51, 0xbc, 0x0c, 0x08, 0xe7, 0x66, 0xc3, 0x9f, 0x38, 0x02, 0x8f, - 0x3e, 0x08, 0xc8, 0xdd, 0xf0, 0x29, 0x03, 0x6a, 0xde, 0xe4, 0xac, 0x32, 0x9d, 0x2c, 0xfd, 0x3a, - 0x43, 0x8b, 0x73, 0xe2, 0x6b, 0xa2, 0x3b, 0xa9, 0x16, 0x38, 0xa3, 0x17, 0xda, 0x06, 0x14, 0xb2, - 0xdb, 0xe1, 0x06, 0x98, 0x1d, 0x39, 0xfc, 0xf6, 0xb9, 0x48, 0x91, 0xdd, 0x4c, 0x81, 0xc0, 0x19, - 0x60, 0xf5, 0x7f, 0x5d, 0x82, 0x09, 0xbe, 0x45, 0x96, 0x9c, 0xc0, 0xeb, 0x9d, 0xc2, 0x05, 0x41, - 0x62, 0x17, 0x44, 0xb5, 0xf8, 0x99, 0x67, 0x03, 0xce, 0xbd, 0x1f, 0xda, 0x89, 0xfb, 0x61, 0x69, - 0x50, 0x44, 0xfd, 0xaf, 0x87, 0x7f, 0xa7, 0xc1, 0x19, 0xa5, 0xf5, 0x29, 0xdc, 0x0e, 0x66, 0xfc, - 0x76, 0x78, 0x76, 0xc0, 0xf9, 0xe5, 0x5c, 0x0e, 0x6e, 0x6c, 0x5a, 0x8c, 0x70, 0x3f, 0x09, 0xb0, - 0xc1, 0xc8, 0xc9, 0x6a, 0xc4, 0x27, 0x85, 0x9f, 0x7c, 0x31, 0xac, 0xc1, 0x4a, 0xab, 0x18, 0xcd, - 0x2a, 0xf5, 0xa5, 0x59, 0xff, 0x65, 0x08, 0xce, 0xa6, 0x96, 0x3d, 0x4d, 0x47, 0xb4, 0x1f, 0x10, - 0x1d, 0x29, 0xfd, 0x20, 0xe8, 0xc8, 0x50, 0x21, 0x3a, 0x72, 0xe8, 0x7b, 0x02, 0x79, 0x80, 0xda, - 0x56, 0x8b, 0x77, 0x6b, 0x04, 0x86, 0x17, 0xac, 0x5b, 0x6d, 0x22, 0x28, 0xce, 0x8f, 0x1c, 0x6e, - 0xcb, 0xd2, 0x1e, 0x9c, 0xf0, 0xac, 0xa4, 0x20, 0xe1, 0x0c, 0xe8, 0xfa, 0x37, 0x87, 0x01, 0xaa, - 0x0b, 0xd8, 0x0d, 0xf8, 0x60, 0x9f, 0x85, 0x91, 0xce, 0x96, 0xe1, 0xcb, 0xfd, 0xf4, 0x98, 0xdc, - 0x8c, 0x6b, 0xb4, 0xf0, 0xfe, 0x5e, 0x65, 0xb6, 0xea, 0x11, 0x93, 0x38, 0x81, 0x65, 0xd8, 0xbe, - 0xec, 0xc4, 0xea, 0x30, 0xef, 0x47, 0xe7, 0x40, 0x97, 0xb1, 0xea, 0xb6, 0x3b, 0x36, 0xa1, 0xb5, - 0x6c, 0x0e, 0xa5, 0x62, 0x73, 0x58, 0x4e, 0x41, 0xc2, 0x19, 0xd0, 0x25, 0xce, 0xba, 0x63, 0x05, - 0x96, 0x11, 0xe2, 0x1c, 0x2a, 0x8e, 0x33, 0x0e, 0x09, 0x67, 0x40, 0x47, 0x6f, 0x68, 0x30, 0x17, - 0x2f, 0xbe, 0x61, 0x39, 0x96, 0xbf, 0x45, 0x4c, 0x86, 0x7c, 0xf8, 0xc8, 0xc8, 0xaf, 0xec, 0xef, - 0x55, 0xe6, 0x96, 0x73, 0x21, 0xe2, 0x3e, 0xd8, 0xd0, 0x67, 0x35, 0xb8, 0x9c, 0x58, 0x17, 0xcf, - 0x6a, 0xb5, 0x88, 0x27, 0x46, 0x73, 0xf4, 0x2d, 0x54, 0xd9, 0xdf, 0xab, 0x5c, 0x5e, 0xce, 0x07, - 0x89, 0xfb, 0xe1, 0xd3, 0xbf, 0xa2, 0xc1, 0x50, 0x15, 0xd7, 0xd1, 0xe3, 0x31, 0x21, 0xee, 0x01, - 0x55, 0x88, 0xbb, 0xbf, 0x57, 0x19, 0xab, 0xe2, 0xba, 0x22, 0xcf, 0x7d, 0x56, 0x83, 0xb3, 0x4d, - 0xd7, 0x09, 0x0c, 0x3a, 0x2e, 0xcc, 0x39, 0x1d, 0x49, 0x55, 0x0b, 0xc9, 0x2f, 0xd5, 0x04, 0xb0, - 0xc5, 0x4b, 0x62, 0x00, 0x67, 0x93, 0x35, 0x3e, 0x4e, 0x63, 0xd6, 0xbf, 0xad, 0xc1, 0x64, 0xd5, - 0x76, 0xbb, 0xe6, 0x9a, 0xe7, 0x6e, 0x5a, 0x36, 0x79, 0x73, 0x08, 0x6d, 0xea, 0x88, 0xf3, 0x2e, - 0x65, 0x26, 0x44, 0xa9, 0x0d, 0xdf, 0x24, 0x42, 0x94, 0x3a, 0xe4, 0x9c, 0x7b, 0xf2, 0x17, 0xc7, - 0xe2, 0x33, 0x63, 0x37, 0xe5, 0xa3, 0x50, 0x6e, 0x1a, 0x8b, 0x5d, 0xc7, 0xb4, 0x43, 0x29, 0x8a, - 0x8e, 0xb2, 0xba, 0xc0, 0xcb, 0x70, 0x58, 0x8b, 0x5e, 0x03, 0x88, 0x14, 0x6a, 0xe2, 0x33, 0xdc, - 0x18, 0x4c, 0x89, 0xd7, 0x20, 0x41, 0x60, 0x39, 0x2d, 0x3f, 0xfa, 0xf4, 0x51, 0x1d, 0x56, 0xb0, - 0xa1, 0x8f, 0xc1, 0x94, 0x58, 0xe4, 0x7a, 0xdb, 0x68, 0x09, 0x7d, 0x43, 0xc1, 0x95, 0x5a, 0x51, - 0x00, 0x2d, 0x5e, 0x10, 0x88, 0xa7, 0xd4, 0x52, 0x1f, 0xc7, 0xb1, 0xa1, 0x1e, 0x4c, 0xb6, 0x55, - 0x1d, 0xca, 0x70, 0x71, 0x76, 0x46, 0xd1, 0xa7, 0x2c, 0x9e, 0x17, 0xc8, 0x27, 0x63, 0xda, 0x97, - 0x18, 0xaa, 0x0c, 0x51, 0x70, 0xe4, 0xa4, 0x44, 0x41, 0x02, 0x63, 0x5c, 0x18, 0xf6, 0x67, 0x47, - 0xd9, 0x04, 0xaf, 0x17, 0x99, 0x20, 0x97, 0xab, 0x23, 0x0d, 0x31, 0xff, 0xed, 0x63, 0x09, 0x1b, - 0xed, 0xc0, 0x24, 0xbd, 0xd5, 0x1b, 0xc4, 0x26, 0xcd, 0xc0, 0xf5, 0x66, 0xc7, 0x8a, 0x6b, 0x60, - 0x1b, 0x0a, 0x1c, 0xae, 0x4a, 0x53, 0x4b, 0x70, 0x0c, 0x4f, 0xa8, 0x2b, 0x28, 0xe7, 0xea, 0x0a, - 0xba, 0x30, 0xb1, 0xa3, 0xe8, 0xb4, 0xc6, 0xd9, 0x22, 0xbc, 0xbf, 0xc8, 0xc0, 0x22, 0x05, 0xd7, - 0xe2, 0x39, 0x81, 0x68, 0x42, 0x55, 0x86, 0xa9, 0x78, 0xf4, 0xbd, 0x32, 0x9c, 0xad, 0xda, 0x5d, - 0x3f, 0x20, 0xde, 0x82, 0x78, 0x24, 0x22, 0x1e, 0xfa, 0xa4, 0x06, 0x17, 0xd9, 0xbf, 0x35, 0xf7, - 0x9e, 0x53, 0x23, 0xb6, 0xd1, 0x5b, 0xd8, 0xa4, 0x2d, 0x4c, 0xf3, 0x68, 0x14, 0xa8, 0xd6, 0x15, - 0x5c, 0x24, 0x53, 0xce, 0x35, 0x32, 0x21, 0xe2, 0x1c, 0x4c, 0xe8, 0xe7, 0x34, 0xb8, 0x94, 0x51, - 0x55, 0x23, 0x36, 0x09, 0x24, 0xe7, 0x72, 0xd4, 0x71, 0x3c, 0xb4, 0xbf, 0x57, 0xb9, 0xd4, 0xc8, - 0x03, 0x8a, 0xf3, 0xf1, 0xa1, 0xbf, 0xa7, 0xc1, 0x5c, 0x46, 0xed, 0x0d, 0xc3, 0xb2, 0xbb, 0x9e, - 0x64, 0x6a, 0x8e, 0x3a, 0x1c, 0xc6, 0x5b, 0x34, 0x72, 0xa1, 0xe2, 0x3e, 0x18, 0xd1, 0xc7, 0xe1, - 0x42, 0x58, 0x7b, 0xd7, 0x71, 0x08, 0x31, 0x63, 0x2c, 0xce, 0x51, 0x87, 0x72, 0x69, 0x7f, 0xaf, - 0x72, 0xa1, 0x91, 0x05, 0x10, 0x67, 0xe3, 0x41, 0x2d, 0x78, 0x28, 0xaa, 0x08, 0x2c, 0xdb, 0x7a, - 0x8d, 0x73, 0x61, 0x5b, 0x1e, 0xf1, 0xb7, 0x5c, 0xdb, 0x64, 0xc4, 0x42, 0x5b, 0x7c, 0xeb, 0xfe, - 0x5e, 0xe5, 0xa1, 0x46, 0xbf, 0x86, 0xb8, 0x3f, 0x1c, 0x64, 0xc2, 0xa4, 0xdf, 0x34, 0x9c, 0xba, - 0x13, 0x10, 0x6f, 0xc7, 0xb0, 0x67, 0x47, 0x0b, 0x4d, 0x90, 0x1f, 0x51, 0x05, 0x0e, 0x8e, 0x41, - 0x45, 0xef, 0x81, 0x32, 0xd9, 0xed, 0x18, 0x8e, 0x49, 0x38, 0x59, 0x18, 0x5f, 0x7c, 0x90, 0x5e, - 0x46, 0x4b, 0xa2, 0xec, 0xfe, 0x5e, 0x65, 0x52, 0xfe, 0xbf, 0xe2, 0x9a, 0x04, 0x87, 0xad, 0xd1, - 0x47, 0xe1, 0x3c, 0x7b, 0x0f, 0x33, 0x09, 0x23, 0x72, 0xbe, 0x64, 0x74, 0xcb, 0x85, 0xc6, 0xc9, - 0xde, 0x36, 0x56, 0x32, 0xe0, 0xe1, 0x4c, 0x2c, 0xf4, 0x33, 0xb4, 0x8d, 0xdd, 0x9b, 0x9e, 0xd1, - 0x24, 0x9b, 0x5d, 0x7b, 0x9d, 0x78, 0x6d, 0xcb, 0xe1, 0xb2, 0x04, 0x69, 0xba, 0x8e, 0x49, 0x49, - 0x89, 0xf6, 0xe8, 0x08, 0xff, 0x0c, 0x2b, 0xfd, 0x1a, 0xe2, 0xfe, 0x70, 0xd0, 0x3b, 0x61, 0xd2, - 0x6a, 0x39, 0xae, 0x47, 0xd6, 0x0d, 0xcb, 0x09, 0xfc, 0x59, 0x60, 0x6a, 0x77, 0xb6, 0xac, 0x75, - 0xa5, 0x1c, 0xc7, 0x5a, 0xe9, 0x7b, 0x43, 0x30, 0x5e, 0x75, 0x1d, 0xd3, 0x62, 0x62, 0xcc, 0x13, - 0x31, 0x9d, 0xe9, 0x43, 0x2a, 0x1d, 0xbc, 0xbf, 0x57, 0x99, 0x0a, 0x1b, 0x2a, 0x84, 0xf1, 0xe9, - 0x50, 0x51, 0xc1, 0x05, 0xe3, 0xb7, 0xc6, 0x35, 0x0c, 0xf7, 0xf7, 0x2a, 0x67, 0xc2, 0x6e, 0x71, - 0xa5, 0x03, 0xda, 0xe1, 0xf2, 0xc7, 0xba, 0x67, 0x38, 0xbe, 0x35, 0x80, 0xfc, 0x11, 0x4a, 0x96, - 0xcb, 0x29, 0x68, 0x38, 0x03, 0x03, 0x7a, 0x05, 0xa6, 0x69, 0xe9, 0xdd, 0x8e, 0x69, 0x04, 0xa4, - 0xa0, 0xd8, 0x71, 0x51, 0xe0, 0x9c, 0x5e, 0x8e, 0x41, 0xc2, 0x09, 0xc8, 0x5c, 0xc7, 0x6c, 0xf8, - 0xae, 0xc3, 0x8e, 0x5b, 0x4c, 0xc7, 0x4c, 0x4b, 0xb1, 0xa8, 0x45, 0x8f, 0xc1, 0x58, 0x9b, 0xf8, - 0xbe, 0xd1, 0x22, 0xec, 0xfc, 0x8c, 0x47, 0x97, 0xe4, 0x0a, 0x2f, 0xc6, 0xb2, 0x1e, 0xbd, 0x03, - 0x46, 0x9a, 0xae, 0x49, 0xfc, 0xd9, 0x31, 0xf6, 0x85, 0xa9, 0xf4, 0x35, 0x52, 0xa5, 0x05, 0xf7, - 0xf7, 0x2a, 0xe3, 0x4c, 0x0e, 0xa7, 0xbf, 0x30, 0x6f, 0xa4, 0xff, 0x32, 0xe5, 0x59, 0x13, 0x4c, - 0xfa, 0x21, 0x74, 0xe3, 0xa7, 0xa7, 0x66, 0xd6, 0x3f, 0x4f, 0x05, 0x06, 0xd7, 0x09, 0x3c, 0xd7, - 0x5e, 0xb3, 0x0d, 0x87, 0xa0, 0x4f, 0x6b, 0x30, 0xb3, 0x65, 0xb5, 0xb6, 0xd4, 0xc7, 0x2d, 0x71, - 0xb1, 0x15, 0xe2, 0xed, 0x6f, 0x25, 0x60, 0x2d, 0x9e, 0xdf, 0xdf, 0xab, 0xcc, 0x24, 0x4b, 0x71, - 0x0a, 0xa7, 0xfe, 0x99, 0x12, 0x9c, 0x17, 0x23, 0xb3, 0xe9, 0x4d, 0xd3, 0xb1, 0xdd, 0x5e, 0x9b, - 0x38, 0xa7, 0xf1, 0x0e, 0x25, 0xbf, 0x50, 0x29, 0xf7, 0x0b, 0xb5, 0x53, 0x5f, 0x68, 0xa8, 0xc8, - 0x17, 0x0a, 0x37, 0xf2, 0x01, 0x5f, 0xe9, 0xcf, 0x35, 0x98, 0xcd, 0x5a, 0x8b, 0x53, 0x90, 0x81, - 0xda, 0x71, 0x19, 0xe8, 0x56, 0x51, 0xa1, 0x36, 0x39, 0xf4, 0x1c, 0x59, 0xe8, 0xfb, 0x25, 0xb8, - 0x18, 0x35, 0xaf, 0x3b, 0x7e, 0x60, 0xd8, 0x36, 0x57, 0xf3, 0x9c, 0xfc, 0x77, 0xef, 0xc4, 0x44, - 0xd9, 0xd5, 0xc1, 0xa6, 0xaa, 0x8e, 0x3d, 0x57, 0xd3, 0xbc, 0x9b, 0xd0, 0x34, 0xaf, 0x1d, 0x23, - 0xce, 0xfe, 0x4a, 0xe7, 0xff, 0xa6, 0xc1, 0x5c, 0x76, 0xc7, 0x53, 0xd8, 0x54, 0x6e, 0x7c, 0x53, - 0x7d, 0xf0, 0xf8, 0x66, 0x9d, 0xb3, 0xad, 0x7e, 0xbb, 0x94, 0x37, 0x5b, 0x26, 0x6c, 0x6f, 0xc2, - 0x19, 0x2a, 0x05, 0xf9, 0x81, 0x50, 0x89, 0x1e, 0xcd, 0x56, 0x40, 0xea, 0x88, 0xce, 0xe0, 0x38, - 0x0c, 0x9c, 0x04, 0x8a, 0x56, 0x61, 0x8c, 0x8a, 0x3e, 0x14, 0x7e, 0xe9, 0xf0, 0xf0, 0xc3, 0xdb, - 0xa8, 0xc1, 0xfb, 0x62, 0x09, 0x04, 0xfd, 0x04, 0x4c, 0x99, 0xe1, 0x89, 0x3a, 0xe0, 0xa1, 0x30, - 0x09, 0x95, 0x29, 0xaf, 0x6b, 0x6a, 0x6f, 0x1c, 0x07, 0xa6, 0xff, 0x95, 0x06, 0x0f, 0xf6, 0xdb, - 0x5b, 0xe8, 0x55, 0x80, 0xa6, 0x64, 0x2f, 0xb8, 0xa9, 0x48, 0x41, 0xf5, 0x76, 0xc8, 0xa4, 0x44, - 0x07, 0x34, 0x2c, 0xf2, 0xb1, 0x82, 0x24, 0xe3, 0xfd, 0xb1, 0x74, 0x42, 0xef, 0x8f, 0xfa, 0x7f, - 0xd7, 0x54, 0x52, 0xa4, 0x7e, 0xdb, 0x37, 0x1b, 0x29, 0x52, 0xc7, 0x9e, 0xab, 0x5f, 0xfb, 0x56, - 0x09, 0xae, 0x66, 0x77, 0x51, 0xee, 0xde, 0x0f, 0xc0, 0x68, 0x87, 0xdb, 0xf3, 0x0c, 0xb1, 0xbb, - 0xf1, 0x51, 0x4a, 0x59, 0xb8, 0xb5, 0xcd, 0xfd, 0xbd, 0xca, 0x5c, 0x16, 0xa1, 0x17, 0x76, 0x3a, - 0xa2, 0x1f, 0xb2, 0x12, 0x5a, 0x06, 0xce, 0xfd, 0xfd, 0xf8, 0x21, 0x89, 0x8b, 0xb1, 0x41, 0xec, - 0x43, 0x2b, 0x16, 0x3e, 0xa1, 0xc1, 0x74, 0x6c, 0x47, 0xfb, 0xb3, 0x23, 0x6c, 0x8f, 0x16, 0x7a, - 0xfa, 0x89, 0x1d, 0x95, 0xe8, 0xe6, 0x8e, 0x15, 0xfb, 0x38, 0x81, 0x30, 0x41, 0x66, 0xd5, 0x55, - 0x7d, 0xd3, 0x91, 0x59, 0x75, 0xf0, 0x39, 0x64, 0xf6, 0x97, 0x4a, 0x79, 0xb3, 0x65, 0x64, 0xf6, - 0x1e, 0x8c, 0x4b, 0x4b, 0x57, 0x49, 0x2e, 0x6e, 0x0c, 0x3a, 0x26, 0x0e, 0x2e, 0x32, 0x7b, 0x90, - 0x25, 0x3e, 0x8e, 0x70, 0xa1, 0x9f, 0xd1, 0x00, 0xa2, 0x0f, 0x23, 0x0e, 0xd5, 0xfa, 0xf1, 0x2d, - 0x87, 0xc2, 0xd6, 0x4c, 0xd3, 0x23, 0xad, 0x6c, 0x0a, 0x05, 0xaf, 0xfe, 0xbf, 0x87, 0x00, 0xa5, - 0xc7, 0x4e, 0xd9, 0xcd, 0x6d, 0xcb, 0x31, 0x93, 0x02, 0xc1, 0x6d, 0xcb, 0x31, 0x31, 0xab, 0x39, - 0x04, 0x43, 0xfa, 0x0c, 0x9c, 0x69, 0xd9, 0xee, 0x86, 0x61, 0xdb, 0x3d, 0x61, 0xfa, 0x29, 0x8c, - 0x08, 0xcf, 0xd1, 0x8b, 0xe9, 0x66, 0xbc, 0x0a, 0x27, 0xdb, 0xa2, 0x0e, 0xcc, 0x78, 0x54, 0x94, - 0x6d, 0x5a, 0x36, 0x13, 0x9d, 0xdc, 0x6e, 0x50, 0x50, 0x57, 0xc2, 0xd8, 0x7b, 0x9c, 0x80, 0x85, - 0x53, 0xd0, 0xd1, 0xdb, 0x60, 0xac, 0xe3, 0x59, 0x6d, 0xc3, 0xeb, 0x31, 0xe1, 0xac, 0xbc, 0x38, - 0x41, 0x6f, 0xb8, 0x35, 0x5e, 0x84, 0x65, 0x1d, 0xfa, 0x28, 0x8c, 0xdb, 0xd6, 0x26, 0x69, 0xf6, - 0x9a, 0x36, 0x11, 0xca, 0x8d, 0x3b, 0xc7, 0xb3, 0x65, 0x96, 0x25, 0x58, 0xf1, 0xa4, 0x2a, 0x7f, - 0xe2, 0x08, 0x21, 0xaa, 0xc3, 0xb9, 0x7b, 0xae, 0xb7, 0x4d, 0x3c, 0x9b, 0xf8, 0x7e, 0xa3, 0xdb, - 0xe9, 0xb8, 0x5e, 0x40, 0x4c, 0xa6, 0x02, 0x29, 0x73, 0xfb, 0xd6, 0x17, 0xd2, 0xd5, 0x38, 0xab, - 0x8f, 0xfe, 0x46, 0x09, 0x2e, 0xf7, 0x19, 0x04, 0xc2, 0xf4, 0x6c, 0x88, 0x35, 0x12, 0x3b, 0xe1, - 0x9d, 0x7c, 0x3f, 0x8b, 0xc2, 0xfb, 0x7b, 0x95, 0x87, 0xfb, 0x00, 0x68, 0xd0, 0xad, 0x48, 0x5a, - 0x3d, 0x1c, 0x81, 0x41, 0x75, 0x18, 0x35, 0x23, 0x8d, 0xe0, 0xf8, 0xe2, 0x13, 0x94, 0x5a, 0x73, - 0x9d, 0xdd, 0x61, 0xa1, 0x09, 0x00, 0x68, 0x19, 0xc6, 0xf8, 0x43, 0x2c, 0x11, 0x94, 0xff, 0x49, - 0x26, 0x1e, 0xf3, 0xa2, 0xc3, 0x02, 0x93, 0x20, 0xf4, 0xff, 0xa5, 0xc1, 0x58, 0xd5, 0xf5, 0x48, - 0x6d, 0xb5, 0x81, 0x7a, 0x30, 0xa1, 0x98, 0xe0, 0x0b, 0x2a, 0x58, 0x90, 0x2c, 0x30, 0x88, 0x0b, - 0x11, 0x34, 0x69, 0x2e, 0x1a, 0x16, 0x60, 0x15, 0x17, 0x7a, 0x95, 0xae, 0xf9, 0x3d, 0xcf, 0x0a, - 0x28, 0xe2, 0x41, 0xde, 0xaf, 0x38, 0x62, 0x2c, 0x61, 0xf1, 0x1d, 0x15, 0xfe, 0xc4, 0x11, 0x16, - 0x7d, 0x8d, 0x52, 0x80, 0xe4, 0x30, 0xd1, 0x75, 0x18, 0x6e, 0xbb, 0xa6, 0xfc, 0xee, 0x6f, 0x97, - 0xe7, 0x7b, 0xc5, 0x35, 0xe9, 0xda, 0x5e, 0x4c, 0xf7, 0x60, 0x5a, 0x36, 0xd6, 0x47, 0x5f, 0x85, - 0x99, 0x24, 0x7e, 0x74, 0x1d, 0xa6, 0x9b, 0x6e, 0xbb, 0xed, 0x3a, 0x8d, 0xee, 0xe6, 0xa6, 0xb5, - 0x4b, 0x62, 0x76, 0xbc, 0xd5, 0x58, 0x0d, 0x4e, 0xb4, 0xd4, 0xbf, 0xa4, 0xc1, 0x10, 0xfd, 0x2e, - 0x3a, 0x8c, 0x9a, 0x6e, 0xdb, 0xb0, 0x1c, 0x31, 0x2a, 0x66, 0xb3, 0x5c, 0x63, 0x25, 0x58, 0xd4, - 0xa0, 0x0e, 0x8c, 0x4b, 0xa6, 0x69, 0x20, 0x5b, 0x92, 0xda, 0x6a, 0x23, 0xb4, 0xbf, 0x0b, 0x29, - 0xb9, 0x2c, 0xf1, 0x71, 0x84, 0x44, 0x37, 0xe0, 0x6c, 0x6d, 0xb5, 0x51, 0x77, 0x9a, 0x76, 0xd7, - 0x24, 0x4b, 0xbb, 0xec, 0x0f, 0xa5, 0x25, 0x16, 0x2f, 0x11, 0xf3, 0x64, 0xb4, 0x44, 0x34, 0xc2, - 0xb2, 0x8e, 0x36, 0x23, 0xbc, 0x87, 0x30, 0xb6, 0x65, 0xcd, 0x04, 0x10, 0x2c, 0xeb, 0xf4, 0x6f, - 0x97, 0x60, 0x42, 0x19, 0x10, 0xb2, 0x61, 0x8c, 0x4f, 0x57, 0xda, 0xba, 0x2d, 0x15, 0x9c, 0x62, - 0x7c, 0xd4, 0x1c, 0x3b, 0x5f, 0x50, 0x1f, 0x4b, 0x14, 0x2a, 0x5d, 0x2c, 0xf5, 0xa1, 0x8b, 0xf3, - 0x00, 0x7e, 0x64, 0xf9, 0xcd, 0x8f, 0x24, 0xbb, 0x7a, 0x14, 0x7b, 0x6f, 0xa5, 0x05, 0x7a, 0x50, - 0xdc, 0x20, 0xdc, 0x98, 0xa3, 0x9c, 0xb8, 0x3d, 0x36, 0x61, 0xe4, 0x35, 0xd7, 0x21, 0xbe, 0x78, - 0xc3, 0x3a, 0xa6, 0x09, 0x8e, 0x53, 0xfe, 0xe0, 0x25, 0x0a, 0x17, 0x73, 0xf0, 0xfa, 0xaf, 0x68, - 0x00, 0x35, 0x23, 0x30, 0xf8, 0x93, 0xcb, 0x21, 0xec, 0xa5, 0x1f, 0x8c, 0x5d, 0x7c, 0xe5, 0x94, - 0x0d, 0xe9, 0xb0, 0x6f, 0xbd, 0x26, 0xa7, 0x1f, 0x32, 0xd4, 0x1c, 0x7a, 0xc3, 0x7a, 0x8d, 0x60, - 0x56, 0x8f, 0x1e, 0x87, 0x71, 0xe2, 0x34, 0xbd, 0x5e, 0x87, 0x12, 0xef, 0x61, 0xb6, 0xaa, 0xec, - 0x84, 0x2e, 0xc9, 0x42, 0x1c, 0xd5, 0xeb, 0x4f, 0x40, 0x5c, 0x2a, 0x3a, 0x78, 0x94, 0xfa, 0x77, - 0x87, 0xe1, 0xd2, 0xd2, 0x7a, 0xb5, 0x26, 0xe0, 0x59, 0xae, 0x73, 0x9b, 0xf4, 0xfe, 0xd6, 0x3c, - 0xe5, 0x6f, 0xcd, 0x53, 0x8e, 0xd1, 0x3c, 0xe5, 0xbe, 0x06, 0x33, 0x4b, 0xbb, 0x1d, 0xcb, 0x63, - 0x76, 0xfa, 0xc4, 0xa3, 0x62, 0x2c, 0x7a, 0x0c, 0xc6, 0x76, 0xf8, 0xbf, 0x62, 0x73, 0x85, 0xaa, - 0x02, 0xd1, 0x02, 0xcb, 0x7a, 0xb4, 0x09, 0xd3, 0x84, 0x75, 0x67, 0xfc, 0xaa, 0x11, 0x14, 0xd9, - 0x40, 0xdc, 0x0d, 0x24, 0x06, 0x05, 0x27, 0xa0, 0xa2, 0x06, 0x4c, 0x37, 0x6d, 0xc3, 0xf7, 0xad, - 0x4d, 0xab, 0x19, 0x59, 0xa0, 0x8d, 0x2f, 0x3e, 0xce, 0xae, 0x9e, 0x58, 0xcd, 0xfd, 0xbd, 0xca, - 0x05, 0x31, 0xce, 0x78, 0x05, 0x4e, 0x80, 0xd0, 0xbf, 0x50, 0x82, 0xa9, 0xa5, 0xdd, 0x8e, 0xeb, - 0x77, 0x3d, 0xc2, 0x9a, 0x9e, 0x82, 0x04, 0xfe, 0x18, 0x8c, 0x6d, 0x19, 0x8e, 0x69, 0x13, 0x4f, - 0x50, 0x9f, 0x70, 0x6d, 0x6f, 0xf1, 0x62, 0x2c, 0xeb, 0xd1, 0xeb, 0x00, 0x7e, 0x73, 0x8b, 0x98, - 0x5d, 0xc6, 0xc1, 0xf0, 0x43, 0x72, 0xbb, 0x08, 0x0d, 0x8d, 0xcd, 0xb1, 0x11, 0x82, 0x14, 0x94, - 0x3d, 0xfc, 0x8d, 0x15, 0x74, 0xfa, 0x77, 0x34, 0x38, 0x1b, 0xeb, 0x77, 0x0a, 0x82, 0xe5, 0x66, - 0x5c, 0xb0, 0x5c, 0x18, 0x78, 0xae, 0x39, 0xf2, 0xe4, 0xcf, 0x96, 0xe0, 0x81, 0x9c, 0x35, 0x49, - 0x99, 0x2b, 0x68, 0xa7, 0x64, 0xae, 0xd0, 0x85, 0x89, 0xc0, 0xb5, 0x85, 0xa1, 0xa4, 0x5c, 0x81, - 0x42, 0xc6, 0x08, 0xeb, 0x21, 0x98, 0xc8, 0x18, 0x21, 0x2a, 0xf3, 0xb1, 0x8a, 0x47, 0xff, 0x8a, - 0x06, 0xe3, 0xa1, 0xfe, 0xea, 0x87, 0xea, 0x0d, 0xe9, 0xf0, 0x9e, 0x6b, 0xfa, 0x1f, 0x97, 0xe0, - 0x62, 0x08, 0x5b, 0xca, 0x09, 0x8d, 0x80, 0xd2, 0x8d, 0x83, 0x85, 0xe0, 0x07, 0xc5, 0x3d, 0xac, - 0xf0, 0x02, 0x0a, 0xa7, 0x40, 0xf9, 0xa6, 0xae, 0xd7, 0x71, 0x7d, 0xc9, 0x0e, 0x70, 0xbe, 0x89, - 0x17, 0x61, 0x59, 0x87, 0x56, 0x61, 0xc4, 0xa7, 0xf8, 0xc4, 0x6d, 0x72, 0xc4, 0xd5, 0x60, 0x1c, - 0x0d, 0x1b, 0x2f, 0xe6, 0x60, 0xd0, 0xeb, 0xaa, 0x4a, 0x63, 0xa4, 0xb8, 0x9a, 0x85, 0xce, 0xc4, - 0x94, 0x2b, 0x92, 0xe1, 0xcd, 0x91, 0xa5, 0xd6, 0xd0, 0x97, 0x61, 0x46, 0x58, 0x3c, 0xf0, 0x6d, - 0xe3, 0x34, 0x09, 0x7a, 0x4f, 0x6c, 0x67, 0x3c, 0x92, 0x78, 0x45, 0x3e, 0x9f, 0x6c, 0x1f, 0xed, - 0x18, 0xdd, 0x87, 0xf2, 0x4d, 0x31, 0x48, 0x34, 0x07, 0x25, 0x4b, 0x7e, 0x0b, 0x10, 0x30, 0x4a, - 0xf5, 0x1a, 0x2e, 0x59, 0x66, 0xc8, 0x0f, 0x95, 0x72, 0xb9, 0x36, 0xe5, 0x5a, 0x1a, 0xea, 0x7f, - 0x2d, 0xe9, 0xdf, 0x2b, 0xc1, 0x79, 0x89, 0x55, 0xce, 0xb1, 0x26, 0xde, 0xe0, 0x0e, 0xe0, 0x0d, - 0x0f, 0x56, 0x8a, 0xdc, 0x81, 0x61, 0x46, 0x00, 0x0b, 0xbd, 0xcd, 0x85, 0x00, 0xe9, 0x70, 0x30, - 0x03, 0x84, 0x3e, 0x0a, 0xa3, 0xb6, 0xb1, 0x41, 0x6c, 0x69, 0x69, 0x56, 0x48, 0x85, 0x94, 0x35, - 0x5d, 0xae, 0xd9, 0xf4, 0xb9, 0x35, 0x7d, 0xf8, 0x64, 0xc3, 0x0b, 0xb1, 0xc0, 0x39, 0xf7, 0x34, - 0x4c, 0x28, 0xcd, 0xd0, 0x0c, 0x0c, 0x6d, 0x13, 0xfe, 0x36, 0x3b, 0x8e, 0xe9, 0xbf, 0xe8, 0x3c, - 0x8c, 0xec, 0x18, 0x76, 0x57, 0x2c, 0x09, 0xe6, 0x3f, 0xae, 0x97, 0xde, 0xa3, 0xe9, 0xbf, 0xa9, - 0xc1, 0xc4, 0x2d, 0x6b, 0x83, 0x78, 0xdc, 0x6c, 0x81, 0x89, 0x42, 0x31, 0xc7, 0xe1, 0x89, 0x2c, - 0xa7, 0x61, 0xb4, 0x0b, 0xe3, 0xe2, 0xa6, 0x09, 0xad, 0x5a, 0x6f, 0x16, 0x7b, 0x04, 0x0e, 0x51, - 0x0b, 0x0a, 0xae, 0x3a, 0x2a, 0x49, 0x0c, 0x38, 0x42, 0xa6, 0xbf, 0x0e, 0xe7, 0x32, 0x3a, 0xa1, - 0x0a, 0x3b, 0xbe, 0x5e, 0x20, 0xb6, 0x85, 0x3c, 0x8f, 0x5e, 0x80, 0x79, 0x39, 0xba, 0x04, 0x43, - 0xc4, 0x31, 0xc5, 0x9e, 0x18, 0xdb, 0xdf, 0xab, 0x0c, 0x2d, 0x39, 0x26, 0xa6, 0x65, 0x94, 0x4c, - 0xd9, 0x6e, 0x8c, 0x27, 0x61, 0x64, 0x6a, 0x59, 0x94, 0xe1, 0xb0, 0x96, 0x3d, 0xdb, 0x27, 0x5f, - 0xa8, 0x29, 0x77, 0x3a, 0xb3, 0x99, 0x38, 0x3d, 0x83, 0x3c, 0x8c, 0x27, 0x4f, 0xe2, 0xe2, 0xac, - 0x58, 0x90, 0xd4, 0x99, 0xc6, 0x29, 0xbc, 0xfa, 0xef, 0x0d, 0xc3, 0x43, 0xb7, 0x5c, 0xcf, 0x7a, - 0xcd, 0x75, 0x02, 0xc3, 0x5e, 0x73, 0xcd, 0xc8, 0x40, 0x4d, 0x10, 0xe5, 0x4f, 0x69, 0xf0, 0x40, - 0xb3, 0xd3, 0xe5, 0xdc, 0xad, 0xb4, 0x1b, 0x5a, 0x23, 0x9e, 0xe5, 0x16, 0xb5, 0x53, 0x63, 0xae, - 0xa9, 0xd5, 0xb5, 0xbb, 0x59, 0x20, 0x71, 0x1e, 0x2e, 0x66, 0x2e, 0x67, 0xba, 0xf7, 0x1c, 0x36, - 0xb8, 0x46, 0xc0, 0x56, 0xf3, 0xb5, 0xe8, 0x23, 0x14, 0x34, 0x97, 0xab, 0x65, 0x42, 0xc4, 0x39, - 0x98, 0xd0, 0xc7, 0xe1, 0x82, 0xc5, 0x07, 0x87, 0x89, 0x61, 0x5a, 0x0e, 0xf1, 0x7d, 0x66, 0x34, - 0x36, 0x88, 0x3d, 0x58, 0x3d, 0x0b, 0x20, 0xce, 0xc6, 0x83, 0x5e, 0x06, 0xf0, 0x7b, 0x4e, 0x53, - 0xac, 0xff, 0x48, 0x21, 0xac, 0x9c, 0x09, 0x0c, 0xa1, 0x60, 0x05, 0x22, 0x95, 0x70, 0x83, 0x70, - 0x53, 0x8e, 0x32, 0xdb, 0x32, 0x26, 0xe1, 0x46, 0x7b, 0x28, 0xaa, 0xd7, 0xff, 0x99, 0x06, 0x63, - 0xc2, 0xfd, 0x1d, 0xbd, 0x3d, 0xa1, 0xe5, 0x09, 0x69, 0x4f, 0x42, 0xd3, 0xd3, 0x63, 0x4f, 0x7d, - 0x42, 0xc3, 0x27, 0x58, 0x89, 0x42, 0x6a, 0x02, 0x81, 0x38, 0x52, 0x17, 0xc6, 0x9e, 0xfc, 0xa4, - 0x0a, 0x51, 0x41, 0xa6, 0x7f, 0x59, 0x83, 0xb3, 0xa9, 0x5e, 0x87, 0xe0, 0x17, 0x4e, 0xd1, 0x8a, - 0xe6, 0x5b, 0xc3, 0x30, 0xcd, 0x8c, 0xe5, 0x1c, 0xc3, 0xe6, 0x0a, 0x98, 0x53, 0x10, 0x50, 0x1e, - 0x87, 0x71, 0xab, 0xdd, 0xee, 0x06, 0x94, 0x54, 0x0b, 0x1d, 0x3a, 0xfb, 0xe6, 0x75, 0x59, 0x88, - 0xa3, 0x7a, 0xe4, 0x88, 0xab, 0x90, 0x13, 0xf1, 0xe5, 0x62, 0x5f, 0x4e, 0x9d, 0xe0, 0x3c, 0xbd, - 0xb6, 0xf8, 0x7d, 0x95, 0x75, 0x53, 0x7e, 0x5a, 0x03, 0xf0, 0x03, 0xcf, 0x72, 0x5a, 0xb4, 0x50, - 0x5c, 0x97, 0xf8, 0x18, 0xd0, 0x36, 0x42, 0xa0, 0x1c, 0x79, 0xb8, 0x46, 0x51, 0x05, 0x56, 0x30, - 0xa3, 0x05, 0xc1, 0x25, 0x70, 0x8a, 0xff, 0xa3, 0x09, 0x7e, 0xe8, 0xa1, 0x74, 0x74, 0x17, 0xe1, - 0x12, 0x19, 0xb1, 0x11, 0x73, 0x4f, 0xc1, 0x78, 0x88, 0xef, 0xa0, 0x5b, 0x77, 0x52, 0xb9, 0x75, - 0xe7, 0x9e, 0x81, 0x33, 0x89, 0xe1, 0x1e, 0xe9, 0xd2, 0xfe, 0x0f, 0x1a, 0xa0, 0xf8, 0xec, 0x4f, - 0x41, 0xb4, 0x6b, 0xc5, 0x45, 0xbb, 0xc5, 0xc1, 0x3f, 0x59, 0x8e, 0x6c, 0xf7, 0x8d, 0x29, 0x60, - 0xd1, 0x41, 0xc2, 0xe8, 0x2b, 0xe2, 0xe2, 0xa2, 0xf7, 0x6c, 0xe4, 0x61, 0x20, 0x4e, 0xee, 0x00, - 0xf7, 0xec, 0xed, 0x04, 0xac, 0xe8, 0x9e, 0x4d, 0xd6, 0xe0, 0x14, 0x5e, 0xf4, 0x19, 0x0d, 0x66, - 0x8c, 0x78, 0x74, 0x10, 0xb9, 0x32, 0x85, 0xbc, 0x4f, 0x13, 0x91, 0x46, 0xa2, 0xb1, 0x24, 0x2a, - 0x7c, 0x9c, 0x42, 0x8b, 0xde, 0x09, 0x93, 0x46, 0xc7, 0x5a, 0xe8, 0x9a, 0x16, 0x15, 0x0d, 0x64, - 0x68, 0x07, 0x26, 0xae, 0x2e, 0xac, 0xd5, 0xc3, 0x72, 0x1c, 0x6b, 0x15, 0x86, 0xe1, 0x10, 0x0b, - 0x39, 0x3c, 0x60, 0x18, 0x0e, 0xb1, 0x86, 0x51, 0x18, 0x0e, 0xb1, 0x74, 0x2a, 0x12, 0xe4, 0x00, - 0xb8, 0x96, 0xd9, 0x14, 0x28, 0xf9, 0xab, 0x5d, 0x21, 0x09, 0xf9, 0x4e, 0xbd, 0x56, 0x15, 0x18, - 0xd9, 0xed, 0x17, 0xfd, 0xc6, 0x0a, 0x06, 0xf4, 0x79, 0x0d, 0xa6, 0x04, 0xed, 0x16, 0x38, 0xc7, - 0xd8, 0x27, 0x7a, 0xa9, 0xe8, 0x7e, 0x49, 0xec, 0xc9, 0x79, 0xac, 0x02, 0xe7, 0x74, 0x27, 0x74, - 0x50, 0x89, 0xd5, 0xe1, 0xf8, 0x38, 0xd0, 0xdf, 0xd7, 0xe0, 0xbc, 0x4f, 0xbc, 0x1d, 0xab, 0x49, - 0x16, 0x9a, 0x4d, 0xb7, 0xeb, 0xc8, 0xef, 0x50, 0x2e, 0x1e, 0xb5, 0xa0, 0x91, 0x01, 0x8f, 0x5b, - 0x46, 0x67, 0xd5, 0xe0, 0x4c, 0xfc, 0x94, 0x2d, 0x3b, 0x73, 0xcf, 0x08, 0x9a, 0x5b, 0x55, 0xa3, - 0xb9, 0xc5, 0x74, 0xe5, 0xdc, 0x18, 0xba, 0xe0, 0xbe, 0x7e, 0x21, 0x0e, 0x8a, 0xbf, 0x3a, 0x27, - 0x0a, 0x71, 0x12, 0x21, 0x72, 0xa1, 0xec, 0x89, 0x90, 0x4b, 0xb3, 0x50, 0x9c, 0xa5, 0x48, 0xc5, - 0x6f, 0xe2, 0x8c, 0xbd, 0xfc, 0x85, 0x43, 0x24, 0xa8, 0x05, 0x0f, 0x71, 0xd1, 0x66, 0xc1, 0x71, - 0x9d, 0x5e, 0xdb, 0xed, 0xfa, 0x0b, 0xdd, 0x60, 0x8b, 0x38, 0x81, 0xd4, 0x55, 0x4e, 0xb0, 0x6b, - 0x94, 0xd9, 0x83, 0x2f, 0xf5, 0x6b, 0x88, 0xfb, 0xc3, 0x41, 0x2f, 0x42, 0x99, 0xec, 0x10, 0x27, - 0x58, 0x5f, 0x5f, 0x9e, 0x9d, 0x2c, 0xc4, 0xed, 0xb1, 0x29, 0x2c, 0x09, 0x18, 0x38, 0x84, 0x86, - 0xb6, 0x61, 0xcc, 0xe6, 0x31, 0xb3, 0x66, 0xa7, 0x8a, 0x13, 0xc5, 0x64, 0xfc, 0x2d, 0x2e, 0xff, - 0x89, 0x1f, 0x58, 0x62, 0x40, 0x1d, 0xb8, 0x6a, 0x92, 0x4d, 0xa3, 0x6b, 0x07, 0xab, 0x6e, 0x40, - 0x59, 0xda, 0x5e, 0xa4, 0x9f, 0x92, 0x26, 0xf4, 0xd3, 0xcc, 0xc1, 0xf8, 0x91, 0xfd, 0xbd, 0xca, - 0xd5, 0xda, 0x01, 0x6d, 0xf1, 0x81, 0xd0, 0x50, 0x0f, 0x1e, 0x16, 0x6d, 0xee, 0x3a, 0x1e, 0x31, - 0x9a, 0x5b, 0x74, 0x95, 0xd3, 0x48, 0xcf, 0x30, 0xa4, 0xff, 0xdf, 0xfe, 0x5e, 0xe5, 0xe1, 0xda, - 0xc1, 0xcd, 0xf1, 0x61, 0x60, 0xce, 0x7d, 0x00, 0x50, 0xfa, 0x9c, 0x1f, 0x74, 0x61, 0x97, 0xd5, - 0x0b, 0xfb, 0x8b, 0x23, 0x70, 0x99, 0x92, 0x8f, 0x88, 0x4d, 0x5d, 0x31, 0x1c, 0xa3, 0xf5, 0xc3, - 0x79, 0xb5, 0xfd, 0xa6, 0x06, 0x0f, 0x6c, 0x65, 0x8b, 0x90, 0x82, 0x51, 0x7e, 0xae, 0x90, 0xa8, - 0xdf, 0x4f, 0x2a, 0xe5, 0x27, 0xab, 0x6f, 0x13, 0x9c, 0x37, 0x28, 0xf4, 0x01, 0x98, 0x71, 0x5c, - 0x93, 0x54, 0xeb, 0x35, 0xbc, 0x62, 0xf8, 0xdb, 0x0d, 0xf9, 0xf2, 0x37, 0xc2, 0x6d, 0x4e, 0x56, - 0x13, 0x75, 0x38, 0xd5, 0x1a, 0xed, 0x00, 0xea, 0xb8, 0xe6, 0xd2, 0x8e, 0xd5, 0x94, 0x6f, 0x4e, - 0xc5, 0xed, 0x5c, 0xd8, 0xc3, 0xd6, 0x5a, 0x0a, 0x1a, 0xce, 0xc0, 0xc0, 0x64, 0x60, 0x3a, 0x98, - 0x15, 0xd7, 0xb1, 0x02, 0xd7, 0x63, 0x7e, 0x24, 0x03, 0x89, 0x82, 0x4c, 0x06, 0x5e, 0xcd, 0x84, - 0x88, 0x73, 0x30, 0xe9, 0xff, 0x43, 0x83, 0x33, 0x74, 0x5b, 0xac, 0x79, 0xee, 0x6e, 0xef, 0x87, - 0x71, 0x43, 0x3e, 0x26, 0x8c, 0x20, 0xb8, 0xee, 0xe6, 0x82, 0x62, 0x00, 0x31, 0xce, 0xc6, 0x1c, - 0xd9, 0x3c, 0xa8, 0xea, 0xab, 0xa1, 0x7c, 0xf5, 0x95, 0xfe, 0xf9, 0x12, 0x67, 0x31, 0xa5, 0xfa, - 0xe8, 0x87, 0xf2, 0x1c, 0x3e, 0x05, 0x53, 0xb4, 0x6c, 0xc5, 0xd8, 0x5d, 0xab, 0x3d, 0xef, 0xda, - 0xd2, 0x95, 0x87, 0x99, 0xe7, 0xde, 0x56, 0x2b, 0x70, 0xbc, 0x1d, 0xba, 0x0e, 0x63, 0x1d, 0xee, - 0x30, 0x2c, 0x84, 0x9b, 0xab, 0xdc, 0x52, 0x80, 0x15, 0xdd, 0xdf, 0xab, 0x9c, 0x8d, 0x1e, 0x4b, - 0x44, 0x21, 0x96, 0x1d, 0xf4, 0xcf, 0x5e, 0x00, 0x06, 0xdc, 0x26, 0xc1, 0x0f, 0xe3, 0x9a, 0x3c, - 0x01, 0x13, 0xcd, 0x4e, 0xb7, 0x7a, 0xa3, 0xf1, 0x5c, 0xd7, 0x65, 0x42, 0x2b, 0x8b, 0x6d, 0x48, - 0x79, 0xce, 0xea, 0xda, 0x5d, 0x59, 0x8c, 0xd5, 0x36, 0x94, 0x3a, 0x34, 0x3b, 0x5d, 0x41, 0x6f, - 0xd7, 0x54, 0x1b, 0x55, 0x46, 0x1d, 0xaa, 0x6b, 0x77, 0x63, 0x75, 0x38, 0xd5, 0x1a, 0x7d, 0x1c, - 0x26, 0x89, 0x38, 0xb8, 0xb7, 0x0c, 0xcf, 0x14, 0x74, 0xa1, 0x5e, 0x74, 0xf2, 0xe1, 0xd2, 0x4a, - 0x6a, 0xc0, 0x59, 0xf5, 0x25, 0x05, 0x05, 0x8e, 0x21, 0x44, 0x1f, 0x86, 0x4b, 0xf2, 0x37, 0xfd, - 0xca, 0xae, 0x99, 0x24, 0x14, 0x23, 0xdc, 0x47, 0x73, 0x29, 0xaf, 0x11, 0xce, 0xef, 0x8f, 0x7e, - 0x43, 0x83, 0x8b, 0x61, 0xad, 0xe5, 0x58, 0xed, 0x6e, 0x1b, 0x93, 0xa6, 0x6d, 0x58, 0x6d, 0xc1, - 0xa0, 0xbf, 0x70, 0x6c, 0x13, 0x8d, 0x83, 0xe7, 0xc4, 0x2a, 0xbb, 0x0e, 0xe7, 0x0c, 0x09, 0x7d, - 0x59, 0x83, 0xab, 0xb2, 0x6a, 0xcd, 0x23, 0xbe, 0xdf, 0xf5, 0x48, 0xe4, 0x48, 0x26, 0x96, 0x64, - 0xac, 0x10, 0xed, 0x64, 0x9c, 0xca, 0xd2, 0x01, 0xb0, 0xf1, 0x81, 0xd8, 0xd5, 0xed, 0xd2, 0x70, - 0x37, 0x03, 0xc1, 0xd1, 0x9f, 0xd4, 0x76, 0xa1, 0x28, 0x70, 0x0c, 0x21, 0xfa, 0xe7, 0x1a, 0x3c, - 0xa0, 0x16, 0xa8, 0xbb, 0x85, 0xb3, 0xf2, 0x2f, 0x1e, 0xdb, 0x60, 0x12, 0xf0, 0xb9, 0x2e, 0x38, - 0xa7, 0x12, 0xe7, 0x8d, 0x8a, 0x92, 0xed, 0x36, 0xdb, 0x98, 0x9c, 0xdd, 0x1f, 0xe1, 0x64, 0x9b, - 0xef, 0x55, 0x1f, 0xcb, 0x3a, 0x2a, 0xe8, 0x76, 0x5c, 0x73, 0xcd, 0x32, 0xfd, 0x65, 0xab, 0x6d, - 0x05, 0x8c, 0x29, 0x1f, 0xe2, 0xcb, 0xb1, 0xe6, 0x9a, 0x6b, 0xf5, 0x1a, 0x2f, 0xc7, 0xb1, 0x56, - 0xcc, 0x25, 0xda, 0x6a, 0x1b, 0x2d, 0xb2, 0xd6, 0xb5, 0xed, 0x35, 0xcf, 0x65, 0x0a, 0xc3, 0x1a, - 0x31, 0x4c, 0xdb, 0x72, 0x48, 0x41, 0x26, 0x9c, 0x1d, 0xb7, 0x7a, 0x1e, 0x50, 0x9c, 0x8f, 0x0f, - 0xcd, 0x03, 0x6c, 0x1a, 0x96, 0xdd, 0xb8, 0x67, 0x74, 0xee, 0x38, 0x8c, 0x53, 0x2f, 0x73, 0x11, - 0xf6, 0x46, 0x58, 0x8a, 0x95, 0x16, 0x74, 0x37, 0x51, 0x2a, 0x88, 0x09, 0x0f, 0xc5, 0xc3, 0xb8, - 0xea, 0xe3, 0xd8, 0x4d, 0x12, 0x20, 0x5f, 0xbe, 0xdb, 0x0a, 0x0a, 0x1c, 0x43, 0x88, 0x3e, 0xa5, - 0xc1, 0xb4, 0xdf, 0xf3, 0x03, 0xd2, 0x0e, 0xc7, 0x70, 0xe6, 0xb8, 0xc7, 0xc0, 0x54, 0xa9, 0x8d, - 0x18, 0x12, 0x9c, 0x40, 0x8a, 0x0c, 0xb8, 0xcc, 0x56, 0xf5, 0x66, 0xf5, 0x96, 0xd5, 0xda, 0x0a, - 0x1d, 0x9d, 0xd7, 0x88, 0xd7, 0x24, 0x4e, 0x30, 0x3b, 0xc3, 0xf6, 0x0d, 0x33, 0xa5, 0xa9, 0xe7, - 0x37, 0xc3, 0xfd, 0x60, 0xa0, 0x97, 0x61, 0x4e, 0x54, 0x2f, 0xbb, 0xf7, 0x52, 0x18, 0xce, 0x32, - 0x0c, 0xcc, 0x74, 0xa8, 0x9e, 0xdb, 0x0a, 0xf7, 0x81, 0x80, 0xea, 0x70, 0xce, 0x27, 0x1e, 0x7b, - 0x09, 0x21, 0xe1, 0xe6, 0xf1, 0x67, 0x51, 0x64, 0x35, 0xdc, 0x48, 0x57, 0xe3, 0xac, 0x3e, 0xe8, - 0x99, 0xd0, 0x31, 0xa9, 0x47, 0x0b, 0x9e, 0x5b, 0x6b, 0xcc, 0x9e, 0x63, 0xe3, 0x3b, 0xa7, 0xf8, - 0x1b, 0xc9, 0x2a, 0x9c, 0x6c, 0x4b, 0x79, 0x0b, 0x59, 0xb4, 0xd8, 0xf5, 0xfc, 0x60, 0xf6, 0x3c, - 0xeb, 0xcc, 0x78, 0x0b, 0xac, 0x56, 0xe0, 0x78, 0x3b, 0x74, 0x1d, 0xa6, 0x7d, 0xd2, 0x6c, 0xba, - 0xed, 0x8e, 0x10, 0xaf, 0x66, 0x2f, 0xb0, 0xd1, 0xf3, 0x2f, 0x18, 0xab, 0xc1, 0x89, 0x96, 0xa8, - 0x07, 0xe7, 0xc2, 0xc0, 0x34, 0xcb, 0x6e, 0x6b, 0xc5, 0xd8, 0x65, 0xac, 0xfa, 0xc5, 0x83, 0x4f, - 0xe0, 0xbc, 0x7c, 0xda, 0x9e, 0x7f, 0xae, 0x6b, 0x38, 0x81, 0x15, 0xf4, 0xf8, 0x72, 0x55, 0xd3, - 0xe0, 0x70, 0x16, 0x0e, 0xb4, 0x0c, 0xe7, 0x13, 0xc5, 0x37, 0x2c, 0x9b, 0xf8, 0xb3, 0x0f, 0xb0, - 0x69, 0x33, 0x1d, 0x49, 0x35, 0xa3, 0x1e, 0x67, 0xf6, 0x42, 0x77, 0xe0, 0x42, 0xc7, 0x73, 0x03, - 0xd2, 0x0c, 0x6e, 0x53, 0xf6, 0xc4, 0x16, 0x13, 0xf4, 0x67, 0x67, 0xd9, 0x5a, 0xb0, 0x57, 0xa0, - 0xb5, 0xac, 0x06, 0x38, 0xbb, 0x1f, 0xfa, 0xa2, 0x06, 0x57, 0xfc, 0xc0, 0x23, 0x46, 0xdb, 0x72, - 0x5a, 0x55, 0xd7, 0x71, 0x08, 0x23, 0x93, 0x75, 0x33, 0x32, 0xba, 0xbf, 0x54, 0x88, 0x4e, 0xe9, - 0xfb, 0x7b, 0x95, 0x2b, 0x8d, 0xbe, 0x90, 0xf1, 0x01, 0x98, 0xd1, 0xeb, 0x00, 0x6d, 0xd2, 0x76, - 0xbd, 0x1e, 0xa5, 0x48, 0xb3, 0x73, 0xc5, 0x8d, 0x98, 0x56, 0x42, 0x28, 0xfc, 0xf8, 0xc7, 0xde, - 0xaf, 0xa2, 0x4a, 0xac, 0xa0, 0xd3, 0xf7, 0x4a, 0x70, 0x21, 0xf3, 0xe2, 0xa1, 0x27, 0x80, 0xb7, - 0x5b, 0x90, 0x41, 0x6a, 0xc5, 0x93, 0x0f, 0x3b, 0x01, 0x2b, 0xf1, 0x2a, 0x9c, 0x6c, 0x4b, 0xd9, - 0x42, 0x76, 0x52, 0x6f, 0x34, 0xa2, 0xfe, 0xa5, 0x88, 0x2d, 0xac, 0x27, 0xea, 0x70, 0xaa, 0x35, - 0xaa, 0xc2, 0x59, 0x51, 0x56, 0xa7, 0x92, 0x95, 0x7f, 0xc3, 0x23, 0x92, 0xe1, 0xa6, 0x32, 0xca, - 0xd9, 0x7a, 0xb2, 0x12, 0xa7, 0xdb, 0xd3, 0x59, 0xd0, 0x1f, 0xea, 0x28, 0x86, 0xa3, 0x59, 0xac, - 0xc6, 0xab, 0x70, 0xb2, 0xad, 0x14, 0x7d, 0x63, 0x43, 0x18, 0x89, 0x66, 0xb1, 0x9a, 0xa8, 0xc3, - 0xa9, 0xd6, 0xfa, 0x7f, 0x1c, 0x86, 0x87, 0x0f, 0xc1, 0xac, 0xa1, 0x76, 0xf6, 0x72, 0x1f, 0xfd, - 0xe0, 0x1e, 0xee, 0xf3, 0x74, 0x72, 0x3e, 0xcf, 0xd1, 0xf1, 0x1d, 0xf6, 0x73, 0xfa, 0x79, 0x9f, - 0xf3, 0xe8, 0x28, 0x0f, 0xff, 0xf9, 0xdb, 0xd9, 0x9f, 0xbf, 0xe0, 0xaa, 0x1e, 0xb8, 0x5d, 0x3a, - 0x39, 0xdb, 0xa5, 0xe0, 0xaa, 0x1e, 0x62, 0x7b, 0xfd, 0xe9, 0x30, 0x3c, 0x72, 0x18, 0xc6, 0xb1, - 0xe0, 0xfe, 0xca, 0x20, 0x79, 0x27, 0xba, 0xbf, 0xf2, 0xfc, 0x9a, 0x4e, 0x70, 0x7f, 0x65, 0xa0, - 0x3c, 0xe9, 0xfd, 0x95, 0xb7, 0xaa, 0x27, 0xb5, 0xbf, 0xf2, 0x56, 0xf5, 0x10, 0xfb, 0xeb, 0x2f, - 0x93, 0xf7, 0x43, 0xc8, 0x2f, 0xd6, 0x61, 0xa8, 0xd9, 0xe9, 0x16, 0x24, 0x52, 0xcc, 0x40, 0xa8, - 0xba, 0x76, 0x17, 0x53, 0x18, 0x08, 0xc3, 0x28, 0xdf, 0x3f, 0x05, 0x49, 0x10, 0xf3, 0x90, 0xe1, - 0x5b, 0x12, 0x0b, 0x48, 0x74, 0xa9, 0x48, 0x67, 0x8b, 0xb4, 0x89, 0x67, 0xd8, 0x8d, 0xc0, 0xf5, - 0x8c, 0x56, 0x51, 0x6a, 0xc3, 0x96, 0x6a, 0x29, 0x01, 0x0b, 0xa7, 0xa0, 0xd3, 0x05, 0xe9, 0x58, - 0x66, 0x41, 0xfa, 0xc2, 0x16, 0x64, 0xad, 0x5e, 0xc3, 0x14, 0x86, 0xfe, 0x8f, 0xc6, 0x41, 0x09, - 0xfc, 0x86, 0x3e, 0x0c, 0x97, 0x0c, 0xdb, 0x76, 0xef, 0xad, 0x79, 0xd6, 0x8e, 0x65, 0x93, 0x16, - 0x31, 0x43, 0x66, 0xca, 0x17, 0x66, 0x64, 0x4c, 0x60, 0x5a, 0xc8, 0x6b, 0x84, 0xf3, 0xfb, 0xa3, - 0x37, 0x34, 0x38, 0xdb, 0x4c, 0x06, 0xdb, 0x1a, 0xc4, 0xd0, 0x24, 0x15, 0xb9, 0x8b, 0x9f, 0xa7, - 0x54, 0x31, 0x4e, 0xa3, 0x45, 0x3f, 0xa5, 0x71, 0xa5, 0x5c, 0xf8, 0x4c, 0x22, 0xbe, 0xd9, 0xcd, - 0x63, 0x7a, 0x50, 0x8c, 0xb4, 0x7b, 0xd1, 0xdb, 0x55, 0x1c, 0x21, 0xfa, 0xb2, 0x06, 0x17, 0xb6, - 0xb3, 0xde, 0x12, 0xc4, 0x97, 0xbd, 0x53, 0x74, 0x28, 0x39, 0x8f, 0x13, 0x9c, 0x9d, 0xcd, 0x6c, - 0x80, 0xb3, 0x07, 0x12, 0xae, 0x52, 0xa8, 0x5e, 0x15, 0x44, 0xa0, 0xf0, 0x2a, 0x25, 0xf4, 0xb4, - 0xd1, 0x2a, 0x85, 0x15, 0x38, 0x8e, 0x10, 0x75, 0x60, 0x7c, 0x5b, 0xea, 0xb4, 0x85, 0x1e, 0xab, - 0x5a, 0x14, 0xbb, 0xa2, 0x18, 0xe7, 0x86, 0x34, 0x61, 0x21, 0x8e, 0x90, 0xa0, 0x2d, 0x18, 0xdb, - 0xe6, 0x84, 0x48, 0xe8, 0x9f, 0x16, 0x06, 0x96, 0x8f, 0xb9, 0x1a, 0x44, 0x14, 0x61, 0x09, 0x5e, - 0xb5, 0xa2, 0x2d, 0x1f, 0xe0, 0xdc, 0xf1, 0x45, 0x0d, 0x2e, 0xec, 0x10, 0x2f, 0xb0, 0x9a, 0xc9, - 0x97, 0x9c, 0xf1, 0xe2, 0x32, 0xfc, 0xf3, 0x59, 0x00, 0xf9, 0x36, 0xc9, 0xac, 0xc2, 0xd9, 0x43, - 0xa0, 0x12, 0x3d, 0x57, 0xc8, 0x37, 0x02, 0x23, 0xb0, 0x9a, 0xeb, 0xee, 0x36, 0x71, 0xa2, 0xfc, - 0x24, 0x4c, 0x13, 0x54, 0xe6, 0x12, 0xfd, 0x52, 0x7e, 0x33, 0xdc, 0x0f, 0x86, 0xfe, 0x7d, 0x0d, - 0x52, 0x6a, 0x65, 0xf4, 0x0b, 0x1a, 0x4c, 0x6e, 0x12, 0x23, 0xe8, 0x7a, 0xe4, 0xa6, 0x11, 0x84, - 0x1e, 0xe7, 0xcf, 0x1f, 0x87, 0x36, 0x7b, 0xfe, 0x86, 0x02, 0x98, 0x1b, 0x04, 0x84, 0x41, 0x23, - 0xd5, 0x2a, 0x1c, 0x1b, 0xc1, 0xdc, 0xb3, 0x70, 0x36, 0xd5, 0xf1, 0x48, 0x2f, 0x8c, 0xff, 0x4a, - 0x83, 0xac, 0x94, 0x3a, 0xe8, 0x65, 0x18, 0x31, 0x4c, 0x33, 0x8c, 0x91, 0xff, 0x74, 0x31, 0xdb, - 0x14, 0x53, 0x75, 0xec, 0x67, 0x3f, 0x31, 0x07, 0x8b, 0x6e, 0x00, 0x32, 0x62, 0x2f, 0xdc, 0x2b, - 0x91, 0xbb, 0x2a, 0x7b, 0x09, 0x5b, 0x48, 0xd5, 0xe2, 0x8c, 0x1e, 0xfa, 0xcf, 0x6a, 0x80, 0xd2, - 0x61, 0x46, 0x91, 0x07, 0x65, 0xb1, 0x95, 0xe5, 0x57, 0xaa, 0x15, 0x74, 0x29, 0x89, 0xf9, 0x47, - 0x45, 0x86, 0x4e, 0xa2, 0xc0, 0xc7, 0x21, 0x1e, 0xfd, 0xaf, 0x34, 0x88, 0xe2, 0x68, 0xa3, 0x77, - 0xc1, 0x84, 0x49, 0xfc, 0xa6, 0x67, 0x75, 0x82, 0xc8, 0x9b, 0x2a, 0xf4, 0xca, 0xa8, 0x45, 0x55, - 0x58, 0x6d, 0x87, 0x74, 0x18, 0x0d, 0x0c, 0x7f, 0xbb, 0x5e, 0x13, 0x42, 0x25, 0x63, 0x01, 0xd6, - 0x59, 0x09, 0x16, 0x35, 0x51, 0xc8, 0xb0, 0xa1, 0x43, 0x84, 0x0c, 0x43, 0x9b, 0xc7, 0x10, 0x1f, - 0x0d, 0x1d, 0x1c, 0x1b, 0x4d, 0xff, 0xb5, 0x12, 0x9c, 0xa1, 0x4d, 0x56, 0x0c, 0xcb, 0x09, 0x88, - 0xc3, 0x7c, 0x07, 0x0a, 0x2e, 0x42, 0x0b, 0xa6, 0x82, 0x98, 0x6f, 0xdc, 0xd1, 0x3d, 0xcb, 0x42, - 0x6b, 0x9a, 0xb8, 0x47, 0x5c, 0x1c, 0x2e, 0x7a, 0x5a, 0x3a, 0x6f, 0x70, 0xf1, 0xfb, 0x61, 0xb9, - 0x55, 0x99, 0x47, 0xc6, 0x7d, 0xe1, 0x68, 0x18, 0x06, 0x5f, 0x8f, 0xf9, 0x69, 0x3c, 0x05, 0x53, - 0xc2, 0x88, 0x9a, 0xc7, 0x7e, 0x13, 0xe2, 0x37, 0xbb, 0x61, 0x6e, 0xa8, 0x15, 0x38, 0xde, 0x4e, - 0xff, 0x66, 0x09, 0xe2, 0x21, 0xde, 0x8b, 0xae, 0x52, 0x3a, 0xf0, 0x5d, 0xe9, 0xc4, 0x02, 0xdf, - 0xbd, 0x83, 0xe5, 0x47, 0xe1, 0x89, 0xb4, 0xf8, 0x13, 0xb9, 0x9a, 0xd5, 0x84, 0xa7, 0xc1, 0x0a, - 0x5b, 0x44, 0xcb, 0x3a, 0x7c, 0xe4, 0x65, 0x7d, 0x97, 0xb0, 0xae, 0x1c, 0x89, 0x85, 0x1f, 0x94, - 0xd6, 0x95, 0x67, 0x63, 0x1d, 0x15, 0x57, 0x93, 0xaf, 0x69, 0x30, 0x26, 0x62, 0xeb, 0x1e, 0xc2, - 0x95, 0x69, 0x13, 0x46, 0x98, 0xc8, 0x33, 0x08, 0x37, 0xd8, 0xd8, 0x72, 0xdd, 0x20, 0x16, 0x61, - 0x98, 0xf9, 0x0e, 0xb0, 0x7f, 0x31, 0x07, 0xcf, 0x0c, 0xec, 0xbc, 0xe6, 0x96, 0x15, 0x90, 0x66, - 0x20, 0xe3, 0x96, 0x4a, 0x03, 0x3b, 0xa5, 0x1c, 0xc7, 0x5a, 0xe9, 0x5f, 0x1a, 0x86, 0xab, 0x02, - 0x70, 0x8a, 0x45, 0x0a, 0x09, 0x5c, 0x0f, 0xce, 0x89, 0x6f, 0x5b, 0xf3, 0x0c, 0x2b, 0x34, 0x3d, - 0x28, 0x26, 0xfa, 0x8a, 0x64, 0x71, 0x29, 0x70, 0x38, 0x0b, 0x07, 0x8f, 0xc0, 0xc9, 0x8a, 0x6f, - 0x11, 0xc3, 0x0e, 0xb6, 0x24, 0xee, 0xd2, 0x20, 0x11, 0x38, 0xd3, 0xf0, 0x70, 0x26, 0x16, 0x66, - 0xfa, 0x20, 0x2a, 0xaa, 0x1e, 0x31, 0x54, 0xbb, 0x8b, 0x01, 0xcc, 0xff, 0x57, 0x32, 0x21, 0xe2, - 0x1c, 0x4c, 0x4c, 0x87, 0x68, 0xec, 0x32, 0x95, 0x04, 0x26, 0x81, 0x67, 0xb1, 0x48, 0xd1, 0xa1, - 0x16, 0x7d, 0x25, 0x5e, 0x85, 0x93, 0x6d, 0xd1, 0x75, 0x98, 0x66, 0xa6, 0x24, 0x51, 0xa8, 0xab, - 0x91, 0x28, 0x9a, 0xc2, 0x6a, 0xac, 0x06, 0x27, 0x5a, 0xea, 0xbf, 0xae, 0xc1, 0xa4, 0xba, 0xed, - 0x0e, 0xe1, 0xd7, 0xd4, 0x55, 0x2e, 0xc3, 0x01, 0x7c, 0x6e, 0x54, 0xac, 0x87, 0xb9, 0x0f, 0x3f, - 0x3b, 0x04, 0xe7, 0x32, 0xfa, 0x30, 0xc3, 0x00, 0x92, 0xb8, 0x58, 0x07, 0x31, 0x0c, 0x48, 0x5d, - 0xd2, 0xa1, 0x61, 0x40, 0xb2, 0x06, 0xa7, 0xf0, 0xa2, 0xe7, 0x61, 0xa8, 0xe9, 0x59, 0x62, 0x59, - 0x9e, 0x2a, 0x24, 0x16, 0xe2, 0xfa, 0xe2, 0x84, 0xc0, 0x38, 0x54, 0xc5, 0x75, 0x4c, 0x01, 0xd2, - 0xeb, 0x41, 0x3d, 0xd4, 0xf2, 0xae, 0x66, 0xd7, 0x83, 0x7a, 0xf6, 0x7d, 0x1c, 0x6f, 0x87, 0x5e, - 0x84, 0x59, 0xc1, 0xaf, 0x4b, 0x4f, 0x66, 0xd7, 0xf1, 0x03, 0x7a, 0xfe, 0x02, 0x41, 0x4e, 0x1f, - 0xdc, 0xdf, 0xab, 0xcc, 0xde, 0xce, 0x69, 0x83, 0x73, 0x7b, 0xeb, 0x7f, 0x31, 0x04, 0x13, 0x4a, - 0xfc, 0x71, 0xb4, 0x32, 0x88, 0xa2, 0x23, 0x9a, 0xb1, 0x54, 0x76, 0xac, 0xc0, 0x50, 0xab, 0xd3, - 0x2d, 0xa8, 0xe9, 0x08, 0xc1, 0xdd, 0xa4, 0xe0, 0x5a, 0x9d, 0x2e, 0x7a, 0x3e, 0xd4, 0x9d, 0x14, - 0xd3, 0x6e, 0x84, 0x7e, 0x27, 0x09, 0xfd, 0x89, 0x3c, 0x2e, 0xc3, 0xb9, 0xc7, 0xa5, 0x0d, 0x63, - 0xbe, 0x50, 0xac, 0x8c, 0x14, 0x8f, 0x1d, 0xa3, 0xac, 0xb4, 0x50, 0xa4, 0x70, 0xa9, 0x4c, 0xea, - 0x59, 0x24, 0x0e, 0xca, 0xf1, 0x75, 0x99, 0x37, 0x2b, 0x13, 0x37, 0xcb, 0x9c, 0xe3, 0xbb, 0xcb, - 0x4a, 0xb0, 0xa8, 0x49, 0x5d, 0x24, 0x63, 0x87, 0xba, 0x48, 0xfe, 0x6e, 0x09, 0x50, 0x7a, 0x18, - 0xe8, 0x61, 0x18, 0x61, 0xde, 0xf0, 0x82, 0x62, 0x84, 0xfc, 0x39, 0xf3, 0x87, 0xc6, 0xbc, 0x0e, - 0x35, 0x44, 0x24, 0x8c, 0x62, 0x9f, 0x93, 0x59, 0xd6, 0x08, 0x7c, 0x4a, 0xd8, 0x8c, 0xab, 0x31, - 0xd7, 0x89, 0xac, 0x9b, 0xf9, 0x2e, 0x8c, 0xb5, 0x2d, 0x87, 0x3d, 0xef, 0x15, 0xd3, 0x37, 0x71, - 0x03, 0x00, 0x0e, 0x02, 0x4b, 0x58, 0xfa, 0x9f, 0x96, 0xe8, 0xd6, 0x8f, 0xf8, 0xd2, 0x1e, 0x80, - 0xd1, 0x0d, 0x5c, 0xce, 0xe0, 0x88, 0x13, 0x50, 0x2f, 0xf6, 0x95, 0x43, 0xa0, 0x0b, 0x21, 0x40, - 0xfe, 0x30, 0x15, 0xfd, 0xc6, 0x0a, 0x32, 0x8a, 0x3a, 0xb0, 0xda, 0xe4, 0x05, 0xcb, 0x31, 0xdd, - 0x7b, 0x62, 0x79, 0x07, 0x45, 0xbd, 0x1e, 0x02, 0xe4, 0xa8, 0xa3, 0xdf, 0x58, 0x41, 0x46, 0x49, - 0x0b, 0x13, 0x6f, 0x1d, 0x96, 0x10, 0x42, 0x8c, 0xcd, 0xb5, 0x6d, 0x79, 0x77, 0x96, 0x39, 0x69, - 0xa9, 0xe6, 0xb4, 0xc1, 0xb9, 0xbd, 0xf5, 0xdf, 0xd0, 0xe0, 0x42, 0xe6, 0x52, 0xa0, 0x9b, 0x70, - 0x36, 0x32, 0xc6, 0x52, 0x89, 0x7d, 0x39, 0x4a, 0x44, 0x72, 0x3b, 0xd9, 0x00, 0xa7, 0xfb, 0xf0, - 0x6c, 0xb7, 0xa9, 0xcb, 0x44, 0x58, 0x72, 0xa9, 0x0c, 0x8c, 0x5a, 0x8d, 0xb3, 0xfa, 0xe8, 0x1f, - 0x8e, 0x0d, 0x36, 0x5a, 0x2c, 0x7a, 0x32, 0x36, 0x48, 0x2b, 0x74, 0x5d, 0x0b, 0x4f, 0xc6, 0x22, - 0x2d, 0xc4, 0xbc, 0x0e, 0x3d, 0xa4, 0x3a, 0x84, 0x86, 0x74, 0x4b, 0x3a, 0x85, 0xea, 0x3f, 0x09, - 0x0f, 0xe4, 0xbc, 0x57, 0xa2, 0x1a, 0x4c, 0xfa, 0xf7, 0x8c, 0xce, 0x22, 0xd9, 0x32, 0x76, 0x2c, - 0x11, 0x60, 0x80, 0x1b, 0xd9, 0x4d, 0x36, 0x94, 0xf2, 0xfb, 0x89, 0xdf, 0x38, 0xd6, 0x4b, 0x0f, - 0x00, 0x84, 0x31, 0xa6, 0xe5, 0xb4, 0xd0, 0x26, 0x94, 0x0d, 0x91, 0x6c, 0x55, 0xec, 0xe3, 0xf7, - 0x15, 0x12, 0xd5, 0x05, 0x0c, 0x6e, 0x25, 0x2e, 0x7f, 0xe1, 0x10, 0xb6, 0xfe, 0x4f, 0x34, 0xb8, - 0x98, 0xed, 0x52, 0x7e, 0x08, 0x06, 0xa4, 0x0d, 0x13, 0x5e, 0xd4, 0x4d, 0x6c, 0xfa, 0x77, 0xab, - 0x31, 0x45, 0x95, 0x20, 0x5a, 0x94, 0x39, 0xab, 0x7a, 0xae, 0x2f, 0xbf, 0x7c, 0x32, 0xcc, 0x68, - 0x28, 0x18, 0x29, 0x23, 0xc1, 0x2a, 0x7c, 0xfd, 0xf7, 0x4a, 0x00, 0xab, 0x24, 0xb8, 0xe7, 0x7a, - 0xdb, 0x74, 0x89, 0x1e, 0x8c, 0xc9, 0x03, 0xe5, 0x1f, 0x5c, 0x58, 0x83, 0x07, 0x61, 0xb8, 0xe3, - 0x9a, 0xbe, 0x20, 0x7f, 0x6c, 0x20, 0xcc, 0x4e, 0x89, 0x95, 0xa2, 0x0a, 0x8c, 0xb0, 0xe7, 0x09, - 0x71, 0x33, 0x31, 0x69, 0x82, 0xf2, 0x82, 0x3e, 0xe6, 0xe5, 0x3c, 0x85, 0x16, 0xf3, 0xbc, 0xf0, - 0x85, 0x78, 0x24, 0x52, 0x68, 0xf1, 0x32, 0x1c, 0xd6, 0xa2, 0xeb, 0x00, 0x56, 0xe7, 0x86, 0xd1, - 0xb6, 0x6c, 0xca, 0x99, 0x8e, 0x86, 0x19, 0x5b, 0xa1, 0xbe, 0x26, 0x4b, 0xef, 0xef, 0x55, 0xca, - 0xe2, 0x57, 0x0f, 0x2b, 0xad, 0xf5, 0xbf, 0x1e, 0x82, 0x58, 0x76, 0xe3, 0x48, 0x13, 0xa4, 0x9d, - 0x8c, 0x26, 0xe8, 0x45, 0x98, 0xb5, 0x5d, 0xc3, 0x5c, 0x34, 0x6c, 0x7a, 0x1a, 0xbd, 0x06, 0xff, - 0x8c, 0x86, 0xd3, 0x0a, 0x53, 0xd8, 0x32, 0xaa, 0xb4, 0x9c, 0xd3, 0x06, 0xe7, 0xf6, 0x46, 0x41, - 0x98, 0x53, 0x79, 0xa8, 0xb8, 0x93, 0xa2, 0xba, 0x16, 0xf3, 0xaa, 0xbf, 0x4e, 0xc8, 0x60, 0x24, - 0xd2, 0x2e, 0x7f, 0x52, 0x83, 0x0b, 0x64, 0x97, 0xfb, 0xab, 0xad, 0x7b, 0xc6, 0xe6, 0xa6, 0xd5, - 0x14, 0xd6, 0xa3, 0xfc, 0xc3, 0x2e, 0xef, 0xef, 0x55, 0x2e, 0x2c, 0x65, 0x35, 0xb8, 0xbf, 0x57, - 0xb9, 0x96, 0xe9, 0x3e, 0xc8, 0x3e, 0x6b, 0x66, 0x17, 0x9c, 0x8d, 0x6a, 0xee, 0x69, 0x98, 0x38, - 0x82, 0xcf, 0x41, 0xcc, 0x49, 0xf0, 0xf7, 0x4b, 0x30, 0x49, 0xf7, 0xdd, 0xb2, 0xdb, 0x34, 0xec, - 0xda, 0x6a, 0xe3, 0x08, 0x39, 0xc1, 0xd1, 0x32, 0x9c, 0xdf, 0x74, 0xbd, 0x26, 0x59, 0xaf, 0xae, - 0xad, 0xbb, 0xe2, 0x61, 0xa4, 0xb6, 0xda, 0x10, 0x54, 0x9a, 0x89, 0x7a, 0x37, 0x32, 0xea, 0x71, - 0x66, 0x2f, 0x74, 0x07, 0x2e, 0x44, 0xe5, 0x77, 0x3b, 0xdc, 0xdc, 0x84, 0x82, 0x1b, 0x8a, 0xcc, - 0x65, 0x6e, 0x64, 0x35, 0xc0, 0xd9, 0xfd, 0x90, 0x01, 0x97, 0x45, 0xe4, 0x90, 0x1b, 0xae, 0x77, - 0xcf, 0xf0, 0xcc, 0x38, 0xd8, 0xe1, 0x48, 0x71, 0x5c, 0xcb, 0x6f, 0x86, 0xfb, 0xc1, 0xd0, 0x7f, - 0x69, 0x14, 0x14, 0xa7, 0xb2, 0x23, 0x24, 0x5d, 0xfa, 0x55, 0x0d, 0xce, 0x37, 0x6d, 0x8b, 0x38, - 0x41, 0xc2, 0x83, 0x88, 0x93, 0xa3, 0xbb, 0x85, 0xbc, 0xdd, 0x3a, 0xc4, 0xa9, 0xd7, 0x84, 0x75, - 0x4e, 0x35, 0x03, 0xb8, 0xb0, 0x60, 0xca, 0xa8, 0xc1, 0x99, 0x83, 0x61, 0xf3, 0x61, 0xe5, 0xf5, - 0x9a, 0x1a, 0xf2, 0xa0, 0x2a, 0xca, 0x70, 0x58, 0x8b, 0x9e, 0x80, 0x89, 0x96, 0xe7, 0x76, 0x3b, - 0x7e, 0x95, 0x99, 0x04, 0xf3, 0xbd, 0xcf, 0xf8, 0xc2, 0x9b, 0x51, 0x31, 0x56, 0xdb, 0x50, 0x2e, - 0x97, 0xff, 0x5c, 0xf3, 0xc8, 0xa6, 0xb5, 0x2b, 0x88, 0x1c, 0xe3, 0x72, 0x6f, 0x2a, 0xe5, 0x38, - 0xd6, 0x8a, 0x79, 0x2d, 0xfb, 0x7e, 0x97, 0x78, 0x77, 0xf1, 0xb2, 0xc8, 0xb6, 0xc0, 0xbd, 0x96, - 0x65, 0x21, 0x8e, 0xea, 0xd1, 0xe7, 0x34, 0x98, 0xf6, 0xc8, 0xab, 0x5d, 0xcb, 0x23, 0x26, 0x43, - 0xea, 0x0b, 0xcf, 0x3e, 0x3c, 0x98, 0x37, 0xe1, 0x3c, 0x8e, 0x01, 0xe5, 0x14, 0x22, 0x54, 0xae, - 0xc5, 0x2b, 0x71, 0x62, 0x04, 0x74, 0xa9, 0x7c, 0xab, 0xe5, 0x58, 0x4e, 0x6b, 0xc1, 0x6e, 0xf9, - 0xb3, 0x65, 0x46, 0xf4, 0x38, 0x0b, 0x1d, 0x15, 0x63, 0xb5, 0x0d, 0x15, 0x2f, 0xbb, 0x3e, 0x3d, - 0xf7, 0x6d, 0xc2, 0xd7, 0x77, 0x3c, 0xd2, 0x3e, 0xde, 0x55, 0x2b, 0x70, 0xbc, 0x1d, 0xba, 0x0e, - 0xd3, 0xb2, 0x40, 0xac, 0x32, 0xf0, 0x58, 0x77, 0x74, 0x9c, 0x77, 0x63, 0x35, 0x38, 0xd1, 0x72, - 0x6e, 0x01, 0xce, 0x65, 0x4c, 0xf3, 0x48, 0xc4, 0xe5, 0xff, 0x6a, 0x70, 0x81, 0x67, 0x8c, 0x94, - 0x79, 0x1a, 0x64, 0x50, 0xbb, 0xec, 0xf8, 0x70, 0xda, 0x89, 0xc6, 0x87, 0xfb, 0x01, 0xc4, 0xc1, - 0xd3, 0x7f, 0xbd, 0x04, 0x6f, 0x3d, 0xf0, 0x5c, 0xa2, 0x7f, 0xa8, 0xc1, 0x04, 0xd9, 0x0d, 0x3c, - 0x23, 0xf4, 0x9b, 0xa0, 0x9b, 0x74, 0xf3, 0x44, 0x88, 0xc0, 0xfc, 0x52, 0x84, 0x88, 0x6f, 0xdc, - 0x90, 0xc5, 0x52, 0x6a, 0xb0, 0x3a, 0x1e, 0x2a, 0xb4, 0xf2, 0x58, 0x90, 0xea, 0x33, 0x85, 0x48, - 0xe4, 0x2b, 0x6a, 0xe6, 0xde, 0x0f, 0x33, 0x49, 0xc8, 0x47, 0xda, 0x2b, 0xbf, 0x5b, 0x82, 0xb1, - 0x35, 0xcf, 0xa5, 0xdc, 0xdf, 0x29, 0x04, 0x3f, 0x30, 0x62, 0xf1, 0xd1, 0x0b, 0xf9, 0x33, 0x8b, - 0xc1, 0xe6, 0xe6, 0x66, 0xb0, 0x12, 0xb9, 0x19, 0x16, 0x06, 0x41, 0xd2, 0x3f, 0x19, 0xc3, 0xd7, - 0x35, 0x98, 0x10, 0x2d, 0x4f, 0xc1, 0xc5, 0xff, 0x23, 0x71, 0x17, 0xff, 0xf7, 0x0e, 0x30, 0xaf, - 0x1c, 0xdf, 0xfe, 0x2f, 0x6a, 0x30, 0x25, 0x5a, 0xac, 0x90, 0xf6, 0x06, 0xf1, 0xd0, 0x0d, 0x18, - 0xf3, 0xbb, 0xec, 0x43, 0x8a, 0x09, 0x5d, 0x56, 0xe5, 0x09, 0x6f, 0xc3, 0x68, 0xb2, 0x6c, 0xd4, - 0xbc, 0x89, 0x92, 0xf1, 0x80, 0x17, 0x60, 0xd9, 0x99, 0x4a, 0x2f, 0x9e, 0x6b, 0xa7, 0x82, 0x3e, - 0x61, 0xd7, 0x26, 0x98, 0xd5, 0x50, 0xc6, 0x9c, 0xfe, 0x95, 0x2a, 0x3c, 0xc6, 0x98, 0xd3, 0x6a, - 0x1f, 0xf3, 0x72, 0xfd, 0x53, 0xc3, 0xe1, 0x62, 0xb3, 0xa8, 0xe4, 0xb7, 0x60, 0xbc, 0xe9, 0x11, - 0x23, 0x20, 0xe6, 0x62, 0xef, 0x30, 0x83, 0x63, 0xd7, 0x55, 0x55, 0xf6, 0xc0, 0x51, 0x67, 0x7a, - 0x33, 0xa8, 0x2f, 0x43, 0xa5, 0xe8, 0x12, 0xcd, 0x7d, 0x15, 0x7a, 0x1f, 0x8c, 0xb8, 0xf7, 0x9c, - 0xd0, 0xc0, 0xa4, 0x2f, 0x62, 0x36, 0x95, 0x3b, 0xb4, 0x35, 0xe6, 0x9d, 0xd4, 0xa0, 0x67, 0xc3, - 0x7d, 0x82, 0x9e, 0xd9, 0x30, 0xd6, 0x66, 0x9f, 0x61, 0xa0, 0x00, 0xf8, 0xb1, 0x0f, 0xaa, 0xa6, - 0x48, 0x62, 0x90, 0xb1, 0x44, 0x41, 0x6f, 0x78, 0x7a, 0x0b, 0xf9, 0x1d, 0xa3, 0x49, 0xd4, 0x1b, - 0x7e, 0x55, 0x16, 0xe2, 0xa8, 0x1e, 0xf5, 0xe2, 0xd1, 0xf4, 0xc6, 0x8a, 0x6b, 0xf0, 0xc4, 0xf0, - 0x94, 0x00, 0x7a, 0x7c, 0xe9, 0x73, 0x23, 0xea, 0xfd, 0xdc, 0x70, 0xb8, 0x49, 0x45, 0x3e, 0x8b, - 0xec, 0x0c, 0xca, 0x5a, 0xa1, 0x0c, 0xca, 0x3f, 0x2e, 0xa3, 0xbe, 0x96, 0x62, 0xe9, 0xbc, 0xc2, - 0xa8, 0xaf, 0x93, 0x02, 0x75, 0x2c, 0xd2, 0x6b, 0x17, 0xce, 0xf9, 0x81, 0x61, 0x93, 0x86, 0x25, - 0x34, 0x1d, 0x7e, 0x60, 0xb4, 0x3b, 0x05, 0xc2, 0xae, 0x72, 0x2f, 0x83, 0x34, 0x28, 0x9c, 0x05, - 0x1f, 0xfd, 0x8c, 0x06, 0xb3, 0xac, 0x7c, 0xa1, 0x1b, 0xb8, 0x3c, 0x3e, 0x78, 0x84, 0xfc, 0xe8, - 0xcf, 0xcf, 0x4c, 0x00, 0x6c, 0xe4, 0xc0, 0xc3, 0xb9, 0x98, 0xd0, 0xeb, 0x70, 0x81, 0xde, 0xc0, - 0x0b, 0xcd, 0xc0, 0xda, 0xb1, 0x82, 0x5e, 0x34, 0x84, 0xa3, 0xc7, 0x5a, 0x65, 0xc2, 0xc6, 0x72, - 0x16, 0x30, 0x9c, 0x8d, 0x43, 0xff, 0x4b, 0x0d, 0x50, 0x7a, 0x0b, 0x21, 0x1b, 0xca, 0xa6, 0x34, - 0xfb, 0xd7, 0x8e, 0x25, 0xd4, 0x63, 0x48, 0x99, 0x43, 0x6f, 0x81, 0x10, 0x03, 0x72, 0x61, 0xfc, - 0xde, 0x96, 0x15, 0x10, 0xdb, 0xf2, 0x83, 0x63, 0x8a, 0x2c, 0x19, 0x86, 0x59, 0x7b, 0x41, 0x02, - 0xc6, 0x11, 0x0e, 0xfd, 0xe7, 0x87, 0xa1, 0x1c, 0x06, 0xba, 0x3e, 0xf8, 0x25, 0xb6, 0x0b, 0xa8, - 0xa9, 0x24, 0x0b, 0x1b, 0x44, 0x03, 0xc3, 0x98, 0xb0, 0x6a, 0x0a, 0x18, 0xce, 0x40, 0x80, 0x5e, - 0x87, 0xf3, 0x96, 0xb3, 0xe9, 0x19, 0x7e, 0xe0, 0x75, 0x99, 0xae, 0x7c, 0x90, 0x9c, 0x5b, 0x4c, - 0x86, 0xaa, 0x67, 0x80, 0xc3, 0x99, 0x48, 0x10, 0x81, 0x31, 0x1e, 0xcf, 0x5f, 0x06, 0xfd, 0x2b, - 0x94, 0x7d, 0x95, 0xe7, 0x09, 0x88, 0xa8, 0x26, 0xff, 0xed, 0x63, 0x09, 0x9b, 0x07, 0xe4, 0xe0, - 0xff, 0xcb, 0x57, 0x63, 0xb1, 0xef, 0xab, 0xc5, 0xf1, 0x45, 0x89, 0x7c, 0x79, 0x40, 0x8e, 0x78, - 0x21, 0x4e, 0x22, 0xd4, 0xff, 0x48, 0x83, 0x11, 0xee, 0x4e, 0x7b, 0xf2, 0x1c, 0xdc, 0x4f, 0xc6, - 0x38, 0xb8, 0x42, 0x69, 0x83, 0xd8, 0x50, 0x73, 0x13, 0xda, 0x7c, 0x4d, 0x83, 0x71, 0xd6, 0xe2, - 0x14, 0x58, 0xaa, 0x97, 0xe3, 0x2c, 0xd5, 0xd3, 0x85, 0x67, 0x93, 0xc3, 0x50, 0xfd, 0xd1, 0x90, - 0x98, 0x0b, 0xe3, 0x58, 0xea, 0x70, 0x4e, 0xd8, 0xac, 0x2e, 0x5b, 0x9b, 0x84, 0x6e, 0xf1, 0x9a, - 0xd1, 0xe3, 0x0f, 0x44, 0x23, 0xc2, 0x63, 0x2a, 0x5d, 0x8d, 0xb3, 0xfa, 0xa0, 0xdf, 0xd7, 0x28, - 0x6f, 0x10, 0x78, 0x56, 0x73, 0xa0, 0x2c, 0x31, 0xe1, 0xd8, 0xe6, 0x57, 0x38, 0x30, 0x2e, 0x99, - 0xdc, 0x8d, 0x98, 0x04, 0x56, 0x7a, 0x7f, 0xaf, 0x52, 0xc9, 0x50, 0x99, 0x45, 0x19, 0x23, 0xfc, - 0xe0, 0x93, 0x7f, 0xd6, 0xb7, 0x09, 0x53, 0x53, 0xcb, 0x11, 0xa3, 0x5b, 0x30, 0xe2, 0x37, 0xdd, - 0x0e, 0x39, 0x4a, 0xde, 0xab, 0x70, 0x81, 0x1b, 0xb4, 0x27, 0xe6, 0x00, 0xe6, 0x5e, 0x81, 0x49, - 0x75, 0xe4, 0x19, 0x92, 0x4f, 0x4d, 0x95, 0x7c, 0x8e, 0xfc, 0xd2, 0xa5, 0x4a, 0x4a, 0x7f, 0x50, - 0x82, 0x51, 0x9e, 0x7d, 0xf9, 0x10, 0xca, 0x78, 0x4b, 0x86, 0xe6, 0x1f, 0x20, 0xa9, 0xbc, 0x1a, - 0xc7, 0xf2, 0x25, 0xd7, 0x51, 0xd6, 0x40, 0x8d, 0xce, 0x8f, 0x9c, 0x30, 0xba, 0xe9, 0x50, 0xf1, - 0xdc, 0x3c, 0x7c, 0x62, 0x27, 0x1d, 0xcf, 0xf4, 0x4f, 0x34, 0x98, 0x8c, 0x85, 0x8b, 0x6d, 0xc3, - 0x90, 0x17, 0x66, 0x6d, 0x2b, 0xfa, 0x56, 0x21, 0x2d, 0x9f, 0x2e, 0xf7, 0x69, 0x84, 0x29, 0x9e, - 0x30, 0xb2, 0x6c, 0xe9, 0x98, 0x22, 0xcb, 0xea, 0x9f, 0xd7, 0xe0, 0xa2, 0x9c, 0x50, 0x3c, 0x6e, - 0x12, 0x7a, 0x14, 0xca, 0x46, 0xc7, 0x62, 0x2a, 0x35, 0x55, 0x29, 0xb9, 0xb0, 0x56, 0x67, 0x65, - 0x38, 0xac, 0x45, 0xef, 0x80, 0xb2, 0xdc, 0x78, 0x82, 0xed, 0x0c, 0x69, 0x56, 0xf8, 0xfa, 0x12, - 0xb6, 0x40, 0x6f, 0x53, 0xb2, 0x27, 0x8c, 0x44, 0x7c, 0x42, 0x88, 0x98, 0xbf, 0x02, 0xeb, 0xef, - 0x86, 0xf1, 0x46, 0xe3, 0xd6, 0x42, 0xb3, 0x49, 0x7c, 0xff, 0x08, 0xca, 0x65, 0xfd, 0x33, 0x43, - 0x30, 0x25, 0x02, 0xc0, 0x59, 0x8e, 0x69, 0x39, 0xad, 0x53, 0xb8, 0x53, 0xd6, 0x61, 0x9c, 0x6b, - 0x33, 0x0e, 0xc8, 0xb0, 0xd7, 0x90, 0x8d, 0x92, 0x61, 0x96, 0xc3, 0x0a, 0x1c, 0x01, 0x42, 0xb7, - 0x61, 0xf4, 0x55, 0x4a, 0xdf, 0xe4, 0xb9, 0x38, 0x14, 0x99, 0x09, 0x37, 0x3d, 0x23, 0x8d, 0x3e, - 0x16, 0x20, 0x90, 0xcf, 0x4c, 0xf3, 0x18, 0xc3, 0x35, 0x48, 0x84, 0x89, 0xd8, 0xca, 0x86, 0xb9, - 0x53, 0x26, 0x85, 0x85, 0x1f, 0xfb, 0x85, 0x43, 0x44, 0x2c, 0x46, 0x7c, 0xac, 0xc7, 0x9b, 0x24, - 0x46, 0x7c, 0x6c, 0xcc, 0x39, 0x57, 0xe3, 0xd3, 0x70, 0x21, 0x73, 0x31, 0x0e, 0x66, 0x67, 0xf5, - 0xdf, 0x2a, 0xc1, 0x70, 0x83, 0x10, 0xf3, 0x14, 0x76, 0xe6, 0xcb, 0x31, 0x6e, 0xe7, 0x7d, 0x85, - 0xa3, 0xd4, 0xe7, 0x29, 0xab, 0x36, 0x13, 0xca, 0xaa, 0xf7, 0x17, 0xc6, 0xd0, 0x5f, 0x53, 0xf5, - 0xcb, 0x25, 0x00, 0xda, 0x6c, 0xd1, 0x68, 0x6e, 0x73, 0x8a, 0x13, 0xee, 0x66, 0x2d, 0x4e, 0x71, - 0xd2, 0xdb, 0xf0, 0x34, 0x1f, 0x6f, 0x75, 0x18, 0xf5, 0xd8, 0x4d, 0x24, 0xde, 0x3d, 0x80, 0xa7, - 0x7d, 0xa6, 0x25, 0x58, 0xd4, 0xc4, 0xa9, 0xc5, 0xf0, 0x31, 0x51, 0x0b, 0x7d, 0x17, 0x58, 0x9e, - 0xce, 0xda, 0x6a, 0x03, 0xb5, 0x95, 0xd5, 0x29, 0x15, 0xe7, 0xe5, 0x05, 0xb8, 0x03, 0x4f, 0xf9, - 0x67, 0x34, 0x38, 0x93, 0x68, 0x7b, 0x08, 0x99, 0xee, 0x44, 0x68, 0xa6, 0xfe, 0x87, 0x1a, 0x94, - 0xe9, 0x58, 0x4e, 0x81, 0xd0, 0xfc, 0xff, 0x71, 0x42, 0xf3, 0x9e, 0xa2, 0x4b, 0x9c, 0x43, 0x5f, - 0xfe, 0xbc, 0x04, 0x2c, 0x1d, 0x84, 0x30, 0x51, 0x50, 0x5e, 0xfe, 0xb5, 0x9c, 0x97, 0xff, 0xab, - 0xc2, 0x70, 0x20, 0xa1, 0xa3, 0x54, 0x8c, 0x07, 0xde, 0xa1, 0xd8, 0x06, 0x0c, 0xc5, 0x8f, 0x4d, - 0x86, 0x7d, 0xc0, 0x6b, 0x30, 0xe5, 0x6f, 0xb9, 0x6e, 0x10, 0xc6, 0x1f, 0x18, 0x2e, 0xae, 0x8f, - 0x66, 0x76, 0xd0, 0x72, 0x2a, 0xfc, 0x01, 0xaa, 0xa1, 0xc2, 0xc6, 0x71, 0x54, 0x68, 0x1e, 0x60, - 0xc3, 0x76, 0x9b, 0xdb, 0xd5, 0x7a, 0x0d, 0x4b, 0xbb, 0x57, 0x66, 0xb4, 0xb4, 0x18, 0x96, 0x62, - 0xa5, 0xc5, 0x40, 0xb6, 0x0c, 0xdf, 0xd3, 0xf8, 0x4a, 0x1f, 0x61, 0xf3, 0x9e, 0x22, 0x45, 0x79, - 0x7b, 0x82, 0xa2, 0x28, 0xc9, 0xe4, 0x63, 0x54, 0xa5, 0x22, 0x19, 0xf6, 0xe1, 0x48, 0xff, 0x1c, - 0x4b, 0x82, 0xf5, 0xbb, 0x62, 0x9a, 0x61, 0x46, 0x91, 0x0e, 0x4c, 0xd9, 0x6a, 0x62, 0x53, 0x71, - 0x46, 0x0a, 0xe5, 0x44, 0x0d, 0x1d, 0x29, 0x62, 0xc5, 0x38, 0x8e, 0x00, 0x3d, 0x05, 0x53, 0x72, - 0x76, 0x74, 0x31, 0xa5, 0xe5, 0x06, 0xdb, 0x0e, 0x6b, 0x6a, 0x05, 0x8e, 0xb7, 0xd3, 0xff, 0xc1, - 0x30, 0x3c, 0xc4, 0xc7, 0xce, 0x34, 0x06, 0x35, 0xd2, 0x21, 0x8e, 0x49, 0x9c, 0x66, 0x8f, 0xf1, - 0xac, 0xa6, 0xdb, 0x42, 0x3f, 0xad, 0x41, 0x99, 0x38, 0x66, 0xc7, 0xb5, 0x1c, 0xa9, 0xea, 0xff, - 0x50, 0xf1, 0x9c, 0x2c, 0x39, 0x58, 0x96, 0x04, 0x02, 0x11, 0x25, 0x53, 0xfc, 0xc2, 0x21, 0x62, - 0x74, 0x0f, 0x46, 0x3a, 0x9e, 0xbb, 0x21, 0x85, 0xb5, 0xe7, 0x8f, 0x7d, 0x04, 0x6b, 0x14, 0x3a, - 0xff, 0xb8, 0xec, 0x5f, 0xcc, 0xf1, 0xa1, 0xd7, 0x61, 0xf4, 0x1e, 0x21, 0x66, 0xa8, 0xd0, 0x7f, - 0xe1, 0xd8, 0x31, 0xbf, 0xc0, 0xc0, 0xf3, 0x0b, 0x8d, 0xff, 0x8f, 0x05, 0x4a, 0x8a, 0x9c, 0x8d, - 0x42, 0x72, 0x96, 0x2f, 0x9c, 0xcc, 0xb4, 0x05, 0x72, 0xfe, 0x3f, 0x16, 0x28, 0x75, 0x0c, 0x6f, - 0x3b, 0xd4, 0x37, 0x3b, 0x8a, 0x0c, 0x71, 0x07, 0xf4, 0x83, 0x87, 0x73, 0x14, 0x80, 0x6b, 0xf0, - 0xf0, 0x21, 0xe6, 0x77, 0x9c, 0x10, 0xf9, 0x27, 0x3a, 0x0a, 0xc4, 0xef, 0x68, 0xf0, 0x88, 0x02, - 0x72, 0x69, 0x97, 0x4a, 0x5e, 0x55, 0xa3, 0x63, 0x34, 0xad, 0xa0, 0xc7, 0xfd, 0xde, 0x8f, 0x94, - 0xc4, 0xe3, 0x33, 0x1a, 0x8c, 0x71, 0x63, 0x27, 0x79, 0x45, 0xbe, 0x3c, 0xe0, 0xbe, 0xc8, 0x1d, - 0x92, 0x8c, 0x0e, 0x2d, 0xe7, 0xc6, 0x7f, 0xfb, 0x58, 0xe2, 0xd7, 0xff, 0xcd, 0x08, 0xfc, 0xc8, - 0xe1, 0x01, 0xa1, 0xef, 0x69, 0xe9, 0x8c, 0xc1, 0xed, 0x93, 0x1d, 0x7c, 0xa8, 0x69, 0x12, 0xca, - 0x8b, 0x17, 0x52, 0x19, 0x78, 0x8e, 0x49, 0x89, 0xa5, 0xa4, 0x27, 0xfe, 0xa7, 0x1a, 0x4c, 0x52, - 0xd6, 0x21, 0xbc, 0x00, 0xf8, 0x67, 0xea, 0x9c, 0xf0, 0x4c, 0x57, 0x15, 0x94, 0x09, 0x1f, 0x56, - 0xb5, 0x0a, 0xc7, 0xc6, 0x86, 0xee, 0xc6, 0x5f, 0xec, 0xb8, 0x48, 0x7c, 0x25, 0x8b, 0x63, 0x3c, - 0x4a, 0x7e, 0xab, 0x39, 0x1b, 0xa6, 0xe3, 0x2b, 0x7f, 0x92, 0x2a, 0xb8, 0xb9, 0x67, 0xe1, 0x6c, - 0x6a, 0xf6, 0x47, 0x52, 0x40, 0xfd, 0xf4, 0x30, 0x54, 0x94, 0xa5, 0x8e, 0x99, 0x3b, 0x4a, 0xbe, - 0xed, 0x4b, 0x1a, 0x4c, 0x18, 0x8e, 0x23, 0x4c, 0x66, 0xe4, 0xfe, 0x35, 0x07, 0xfc, 0xaa, 0x59, - 0xa8, 0xe6, 0x17, 0x22, 0x34, 0x09, 0x9b, 0x10, 0xa5, 0x06, 0xab, 0xa3, 0xe9, 0x63, 0xf8, 0x58, - 0x3a, 0x35, 0xc3, 0x47, 0xf4, 0x31, 0xc9, 0x2c, 0xf1, 0x6d, 0xf4, 0xe2, 0x09, 0xac, 0x0d, 0xe3, - 0xbd, 0xb2, 0x35, 0x9e, 0x73, 0xef, 0x87, 0x99, 0xe4, 0xca, 0x1d, 0x69, 0x17, 0xfc, 0xd6, 0x50, - 0x8c, 0x54, 0xe7, 0xa2, 0x3f, 0x84, 0x9e, 0xf7, 0xcb, 0x89, 0xcd, 0xc2, 0x49, 0x80, 0x75, 0x52, - 0x0b, 0x72, 0xbc, 0x3b, 0x66, 0xe8, 0xf4, 0x4c, 0x65, 0x07, 0xfd, 0x64, 0x8b, 0x70, 0x41, 0x59, - 0x1f, 0x25, 0x9f, 0xe0, 0x63, 0x30, 0xb6, 0x63, 0xf9, 0x96, 0x8c, 0x48, 0xa4, 0xdc, 0xd0, 0xcf, - 0xf3, 0x62, 0x2c, 0xeb, 0xf5, 0xe5, 0xd8, 0xd9, 0x5f, 0x77, 0x3b, 0xae, 0xed, 0xb6, 0x7a, 0x0b, - 0xf7, 0x0c, 0x8f, 0x60, 0xb7, 0x1b, 0x08, 0x68, 0x87, 0xbd, 0xef, 0x57, 0xe0, 0xaa, 0x02, 0x2d, - 0x33, 0xb4, 0xc2, 0x51, 0xc0, 0x7d, 0x7d, 0x4c, 0x8a, 0x17, 0xc2, 0xf7, 0xf4, 0x77, 0x34, 0xb8, - 0x44, 0xf2, 0xae, 0x02, 0xc1, 0xa2, 0xbf, 0x78, 0x52, 0x57, 0x8d, 0x88, 0x58, 0x9b, 0x57, 0x8d, - 0xf3, 0x47, 0x86, 0x7a, 0xb1, 0xac, 0x9a, 0xa5, 0x41, 0x74, 0xa5, 0x19, 0xdf, 0xbb, 0x5f, 0x4e, - 0x4d, 0xf4, 0x2b, 0x1a, 0x9c, 0xb7, 0x33, 0x8e, 0x8e, 0xe0, 0xab, 0x1b, 0x27, 0x70, 0x2a, 0xf9, - 0xbb, 0x74, 0x56, 0x0d, 0xce, 0x1c, 0x0a, 0xfa, 0xb5, 0xdc, 0x98, 0x1f, 0xfc, 0xd9, 0x78, 0x7d, - 0xc0, 0x41, 0x1e, 0x57, 0xf8, 0x8f, 0x2f, 0x68, 0x80, 0xcc, 0x14, 0x5b, 0x2c, 0x2c, 0x7d, 0x9e, - 0x3b, 0x76, 0x09, 0x85, 0x1b, 0x16, 0xa4, 0xcb, 0x71, 0xc6, 0x20, 0xd8, 0x77, 0x0e, 0x32, 0x8e, - 0xaf, 0x08, 0xe6, 0x3b, 0xe8, 0x77, 0xce, 0xa2, 0x0c, 0xfc, 0x3b, 0x67, 0xd5, 0xe0, 0xcc, 0xa1, - 0xe8, 0x9f, 0x1f, 0xe3, 0x9a, 0x34, 0xf6, 0xf2, 0xbb, 0x01, 0xa3, 0x1b, 0x4c, 0xf3, 0x2a, 0xce, - 0x6d, 0x61, 0x35, 0x2f, 0xd7, 0xdf, 0x72, 0x41, 0x8e, 0xff, 0x8f, 0x05, 0x64, 0xf4, 0x12, 0x0c, - 0x99, 0x8e, 0x2f, 0x0e, 0xdc, 0x7b, 0x07, 0x50, 0x58, 0x46, 0xee, 0x56, 0xb5, 0xd5, 0x06, 0xa6, - 0x40, 0x91, 0x03, 0x65, 0x47, 0x28, 0x9f, 0x84, 0x80, 0x5c, 0x38, 0x61, 0x6b, 0xa8, 0xc4, 0x0a, - 0x55, 0x67, 0xb2, 0x04, 0x87, 0x38, 0x28, 0xbe, 0xc4, 0x6b, 0x4b, 0x61, 0x7c, 0xa1, 0xfa, 0xb5, - 0x9f, 0x86, 0x7b, 0x4d, 0x55, 0xa6, 0x8e, 0x1c, 0x5e, 0x99, 0x3a, 0x95, 0xfb, 0xf8, 0x44, 0x60, - 0x34, 0x30, 0x2c, 0x27, 0xe0, 0xca, 0xb4, 0x82, 0x86, 0x12, 0x74, 0xfc, 0xeb, 0x14, 0x4a, 0xa4, - 0xb5, 0x62, 0x3f, 0x7d, 0x2c, 0x80, 0xd3, 0x8d, 0xb5, 0xc3, 0xd2, 0xa6, 0x8b, 0x83, 0x59, 0x78, - 0x63, 0xf1, 0xe4, 0xeb, 0x7c, 0x63, 0xf1, 0xff, 0xb1, 0x80, 0x8c, 0x5e, 0x81, 0xb2, 0x2f, 0x4d, - 0x5b, 0xca, 0x83, 0x66, 0xeb, 0x15, 0x76, 0x2d, 0xc2, 0xa7, 0x4a, 0x18, 0xb4, 0x84, 0xf0, 0xd1, - 0x06, 0x8c, 0x59, 0xdc, 0x0b, 0x48, 0x84, 0x40, 0x7a, 0xef, 0x00, 0xc9, 0xea, 0xb8, 0x60, 0x2d, - 0x7e, 0x60, 0x09, 0x58, 0xff, 0x3a, 0xf0, 0xb7, 0x10, 0x61, 0x3d, 0xb8, 0x09, 0x65, 0x09, 0x6e, - 0x10, 0xdf, 0x3e, 0x99, 0x1e, 0x94, 0x4f, 0x2d, 0x4c, 0x16, 0x1a, 0xc2, 0x46, 0xd5, 0x2c, 0x1f, - 0xcd, 0x28, 0x69, 0xc2, 0xe1, 0xfc, 0x33, 0x5f, 0x65, 0xf9, 0xfc, 0x64, 0x3c, 0x83, 0xa1, 0xe2, - 0x5b, 0x2b, 0x8c, 0x75, 0x10, 0xcb, 0xe3, 0x27, 0xc3, 0x21, 0x28, 0x48, 0x72, 0xac, 0x2b, 0x87, - 0x0b, 0x59, 0x57, 0x3e, 0x03, 0x67, 0x84, 0x35, 0x4b, 0x9d, 0xa5, 0xce, 0x0f, 0x7a, 0xc2, 0xfd, - 0x84, 0xd9, 0x39, 0x55, 0xe3, 0x55, 0x38, 0xd9, 0x16, 0xfd, 0x81, 0x06, 0xe5, 0xa6, 0x60, 0x39, - 0xc4, 0xb9, 0x5a, 0x1e, 0xec, 0xc1, 0x6c, 0x5e, 0x72, 0x30, 0x9c, 0x99, 0x7e, 0x5e, 0xd2, 0x08, - 0x59, 0x7c, 0x4c, 0x4a, 0x83, 0x70, 0xd4, 0xe8, 0x8f, 0xa9, 0xbc, 0x60, 0xb3, 0x94, 0xa5, 0xcc, - 0x1b, 0x9d, 0xfb, 0xc5, 0xdc, 0x19, 0x70, 0x16, 0x0b, 0x11, 0x44, 0x3e, 0x91, 0x0f, 0x85, 0x52, - 0x41, 0x54, 0x73, 0x4c, 0x73, 0x51, 0x87, 0x8f, 0xfe, 0xb1, 0x06, 0x8f, 0x70, 0x67, 0xa4, 0x2a, - 0xe5, 0x22, 0x58, 0xe6, 0x77, 0x12, 0xa5, 0x9a, 0x8f, 0x6c, 0x41, 0xcb, 0x47, 0xb6, 0x05, 0x7d, - 0x74, 0x7f, 0xaf, 0xf2, 0x48, 0xf5, 0x10, 0xb0, 0xf1, 0xa1, 0x46, 0x80, 0x5e, 0x83, 0x29, 0x5b, - 0x8d, 0x6b, 0x23, 0x08, 0x4c, 0xa1, 0xe7, 0x98, 0x58, 0x80, 0x1c, 0xae, 0x7f, 0x8f, 0x15, 0xe1, - 0x38, 0xaa, 0xb9, 0x6d, 0x98, 0x8a, 0x6d, 0xb4, 0x13, 0x55, 0x92, 0x38, 0x30, 0x93, 0xdc, 0x0f, - 0x27, 0x6a, 0x17, 0x75, 0x1b, 0xc6, 0xc3, 0x8b, 0x0a, 0x3d, 0xa4, 0x20, 0x8a, 0x18, 0x89, 0xdb, - 0xa4, 0xc7, 0xb1, 0x56, 0x62, 0x02, 0x1e, 0x57, 0xc4, 0x3f, 0x4f, 0x0b, 0x04, 0x40, 0xfd, 0x1b, - 0xe2, 0x95, 0x65, 0x9d, 0xb4, 0x3b, 0xb6, 0x11, 0x90, 0x37, 0xff, 0x1b, 0xbf, 0xfe, 0x5f, 0x35, - 0x7e, 0xdf, 0xf0, 0x6b, 0x15, 0x19, 0x30, 0xd1, 0xe6, 0xc1, 0x9b, 0x59, 0x00, 0x06, 0xad, 0x78, - 0xe8, 0x87, 0x95, 0x08, 0x0c, 0x56, 0x61, 0xa2, 0x7b, 0x30, 0x2e, 0x59, 0x1b, 0xa9, 0x91, 0xb8, - 0x31, 0x18, 0x63, 0x10, 0x72, 0x51, 0xe1, 0xf3, 0xb1, 0x2c, 0xf1, 0x71, 0x84, 0x4b, 0x37, 0x00, - 0xa5, 0xfb, 0x50, 0x29, 0x58, 0xba, 0x3b, 0x68, 0xf1, 0x88, 0x88, 0x29, 0x97, 0x87, 0x03, 0x93, - 0x94, 0xeb, 0x5f, 0x29, 0x41, 0x66, 0xc2, 0x3c, 0xa4, 0xc3, 0x28, 0xf7, 0x40, 0x94, 0xf9, 0xcf, - 0x29, 0x2b, 0xc3, 0xdd, 0x13, 0xb1, 0xa8, 0x41, 0x77, 0xb8, 0x26, 0xc4, 0x31, 0x59, 0x24, 0xc2, - 0x88, 0x4a, 0xa8, 0xbe, 0xae, 0x4b, 0x59, 0x0d, 0x70, 0x76, 0x3f, 0xb4, 0x03, 0xa8, 0x6d, 0xec, - 0x26, 0xa1, 0x0d, 0x90, 0x9a, 0x6a, 0x25, 0x05, 0x0d, 0x67, 0x60, 0xa0, 0x17, 0xa9, 0xd1, 0x6c, - 0x92, 0x4e, 0x40, 0x4c, 0x3e, 0x45, 0xf9, 0xc8, 0xcb, 0x2e, 0xd2, 0x85, 0x78, 0x15, 0x4e, 0xb6, - 0xd5, 0xbf, 0x3b, 0x0c, 0x97, 0xe2, 0x8b, 0x48, 0x4f, 0xa8, 0x74, 0x12, 0x7c, 0x56, 0xfa, 0x40, - 0xf0, 0x85, 0x7c, 0x2c, 0xe9, 0x03, 0x31, 0x5b, 0xf5, 0x08, 0xbb, 0x92, 0x0d, 0xdb, 0x97, 0x9d, - 0x62, 0xfe, 0x10, 0x3f, 0x00, 0x8f, 0xbf, 0x1c, 0xcf, 0xc6, 0xa1, 0x13, 0xf5, 0x6c, 0x7c, 0x43, - 0x83, 0xb9, 0x78, 0xf1, 0x0d, 0xcb, 0xb1, 0xfc, 0x2d, 0x11, 0x4f, 0xef, 0xe8, 0x2e, 0x18, 0x2c, - 0x7d, 0xc5, 0x72, 0x2e, 0x44, 0xdc, 0x07, 0x1b, 0xfa, 0xac, 0x06, 0x97, 0x13, 0xeb, 0x12, 0x8b, - 0xee, 0x77, 0x74, 0x6f, 0x0c, 0xe6, 0xa3, 0xbd, 0x9c, 0x0f, 0x12, 0xf7, 0xc3, 0xa7, 0xff, 0x8b, - 0x12, 0x8c, 0x30, 0x1b, 0x85, 0x37, 0x87, 0x51, 0x3a, 0x1b, 0x6a, 0xae, 0x9d, 0x56, 0x2b, 0x61, - 0xa7, 0xf5, 0x6c, 0x71, 0x14, 0xfd, 0x0d, 0xb5, 0x3e, 0x04, 0x17, 0x59, 0xb3, 0x05, 0x93, 0xa9, - 0x65, 0x7c, 0x62, 0x2e, 0x98, 0x26, 0x8b, 0x10, 0x71, 0xb0, 0x2e, 0xfa, 0x21, 0x18, 0xea, 0x7a, - 0x76, 0x32, 0x66, 0xca, 0x5d, 0xbc, 0x8c, 0x69, 0xb9, 0xfe, 0x86, 0x06, 0x33, 0x0c, 0xb6, 0x72, - 0x7c, 0xd1, 0x0e, 0x94, 0x3d, 0x71, 0x84, 0xc5, 0xb7, 0x59, 0x2e, 0x3c, 0xb5, 0x0c, 0xb2, 0x20, - 0x52, 0x7a, 0x8a, 0x5f, 0x38, 0xc4, 0xa5, 0x7f, 0x7b, 0x14, 0x66, 0xf3, 0x3a, 0xa1, 0xcf, 0x69, - 0x70, 0xb1, 0x19, 0x71, 0x73, 0x0b, 0xdd, 0x60, 0xcb, 0xf5, 0xac, 0xc0, 0x12, 0xc6, 0x3b, 0x05, - 0xc5, 0xdc, 0xea, 0x42, 0x38, 0x2a, 0x16, 0x8d, 0xae, 0x9a, 0x89, 0x01, 0xe7, 0x60, 0x46, 0xaf, - 0x03, 0x6c, 0x47, 0xe1, 0x6f, 0x4b, 0xc5, 0x13, 0x6d, 0xb0, 0x69, 0x2b, 0x21, 0x72, 0xe5, 0xa0, - 0x98, 0x66, 0x53, 0x29, 0x57, 0xd0, 0x51, 0xe4, 0xbe, 0xbf, 0x75, 0x9b, 0xf4, 0x3a, 0x86, 0x25, - 0x6d, 0x14, 0x8a, 0x23, 0x6f, 0x34, 0x6e, 0x09, 0x50, 0x71, 0xe4, 0x4a, 0xb9, 0x82, 0x0e, 0x7d, - 0x52, 0x83, 0x29, 0x57, 0x75, 0x27, 0x1f, 0xc4, 0x02, 0x36, 0xd3, 0x2f, 0x9d, 0xb3, 0xd0, 0xf1, - 0xaa, 0x38, 0x4a, 0xba, 0x27, 0xce, 0xfa, 0xc9, 0x2b, 0x4b, 0x10, 0xb5, 0x95, 0xc1, 0xf3, 0xf1, - 0x2a, 0xf7, 0x1f, 0x17, 0xc7, 0xd3, 0xd5, 0x69, 0xf4, 0x6c, 0x50, 0x24, 0x68, 0x9a, 0x4b, 0x4e, - 0xd3, 0xeb, 0x31, 0xcf, 0x50, 0x3a, 0xa8, 0xd1, 0xe2, 0x83, 0x5a, 0x5a, 0xaf, 0xd6, 0x62, 0xc0, - 0xe2, 0x83, 0x4a, 0x57, 0xa7, 0xd1, 0xeb, 0x9f, 0x28, 0xc1, 0x03, 0x39, 0x7b, 0xec, 0x6f, 0x8c, - 0xff, 0xff, 0xd7, 0x34, 0x18, 0x67, 0x6b, 0xf0, 0x26, 0x71, 0x22, 0x62, 0x63, 0xcd, 0xb1, 0x64, - 0xfc, 0x43, 0x0d, 0xce, 0xa6, 0xe2, 0xa0, 0x1e, 0xca, 0x05, 0xe5, 0xd4, 0x8c, 0xec, 0xde, 0x16, - 0xc5, 0x3c, 0x1f, 0x8a, 0x1c, 0x9a, 0x93, 0xf1, 0xce, 0xf5, 0x17, 0x60, 0x2a, 0x66, 0xc8, 0x18, - 0xc6, 0x6a, 0xd2, 0x32, 0x63, 0x35, 0xa9, 0xa1, 0x98, 0x4a, 0xfd, 0x42, 0x31, 0x45, 0x5b, 0x3e, - 0x4d, 0xd9, 0xfe, 0xc6, 0x6c, 0xf9, 0xef, 0x9c, 0x11, 0x5b, 0x9e, 0xbd, 0x38, 0xbc, 0x0c, 0xa3, - 0x2c, 0xf0, 0x93, 0xbc, 0x31, 0xaf, 0x17, 0x0e, 0x28, 0xe5, 0x73, 0x49, 0x8a, 0xff, 0x8f, 0x05, - 0x54, 0x54, 0x83, 0x99, 0xa6, 0xed, 0x76, 0x4d, 0x91, 0xa2, 0x74, 0x35, 0x12, 0xda, 0xc2, 0xb8, - 0xa0, 0xd5, 0x44, 0x3d, 0x4e, 0xf5, 0x40, 0x98, 0xbf, 0x59, 0xf0, 0xfb, 0xac, 0x50, 0x5c, 0xd0, - 0xda, 0x6a, 0x83, 0x67, 0xbf, 0x08, 0xdf, 0x2a, 0x5e, 0x05, 0x20, 0x72, 0xf3, 0x4a, 0xdf, 0xcf, - 0x67, 0x8a, 0x45, 0x3c, 0x0d, 0x8f, 0x80, 0x64, 0x3e, 0xc3, 0x22, 0x1f, 0x2b, 0x48, 0x90, 0x07, - 0x13, 0x5b, 0xd6, 0x06, 0xf1, 0x1c, 0xce, 0x47, 0x8d, 0x14, 0x67, 0x11, 0x6f, 0x45, 0x60, 0xb8, - 0x8c, 0xaf, 0x14, 0x60, 0x15, 0x09, 0xf2, 0x38, 0x3b, 0xc2, 0xd5, 0xc3, 0x83, 0x24, 0xeb, 0x8f, - 0xf4, 0xce, 0xd1, 0x3c, 0xa3, 0x32, 0xac, 0x60, 0x41, 0x0e, 0x80, 0x13, 0x46, 0x7c, 0x1b, 0xe4, - 0xc5, 0x21, 0x8a, 0x1b, 0xc7, 0x19, 0x8f, 0xe8, 0x37, 0x56, 0x30, 0xd0, 0x75, 0x6d, 0x47, 0x21, - 0x04, 0x85, 0x0e, 0xf1, 0xd9, 0x01, 0xc3, 0x38, 0x0a, 0xdd, 0x49, 0x54, 0x80, 0x55, 0x24, 0x74, - 0x8e, 0xed, 0x30, 0xf0, 0x9f, 0xd0, 0x11, 0x16, 0x9a, 0x63, 0x14, 0x3e, 0x50, 0xa4, 0x50, 0x0b, - 0x7f, 0x63, 0x05, 0x03, 0x7a, 0x45, 0x79, 0xea, 0x82, 0xe2, 0x1a, 0xa8, 0x43, 0x3d, 0x73, 0xbd, - 0x2b, 0x52, 0xc4, 0x4c, 0xb0, 0xb3, 0x7a, 0x59, 0x51, 0xc2, 0xb0, 0x80, 0x88, 0x94, 0x7e, 0xa4, - 0x94, 0x32, 0x91, 0x09, 0xf5, 0x64, 0x5f, 0x13, 0xea, 0x2a, 0xe5, 0xd0, 0x14, 0x97, 0x1e, 0x46, - 0x14, 0xa6, 0xa2, 0x17, 0x8e, 0x46, 0xb2, 0x12, 0xa7, 0xdb, 0x73, 0xa2, 0x4f, 0x4c, 0xd6, 0x77, - 0x5a, 0x25, 0xfa, 0xbc, 0x0c, 0x87, 0xb5, 0x68, 0x07, 0x26, 0x7d, 0xc5, 0x1e, 0x5b, 0xe4, 0xbd, - 0x1c, 0xe0, 0x6d, 0x4a, 0xd8, 0x62, 0xb3, 0x50, 0x58, 0x6a, 0x09, 0x8e, 0xe1, 0x41, 0xaf, 0xab, - 0xc6, 0x8d, 0x33, 0xc5, 0x9d, 0x6f, 0xb3, 0x03, 0x3d, 0x46, 0x1a, 0xb6, 0xd0, 0xae, 0x4e, 0xb5, - 0x39, 0xec, 0xc6, 0xcd, 0xf8, 0xce, 0x1e, 0x4b, 0xb0, 0x81, 0x03, 0xcd, 0xfc, 0xe8, 0xa7, 0x25, - 0xbb, 0x1d, 0xd7, 0xef, 0x7a, 0x84, 0x05, 0xb0, 0x65, 0x9f, 0x07, 0x45, 0x9f, 0x76, 0x29, 0x59, - 0x89, 0xd3, 0xed, 0xd1, 0xa7, 0x35, 0x98, 0xe1, 0x69, 0x43, 0xe9, 0xd5, 0xe5, 0x3a, 0xc4, 0x09, - 0x7c, 0x96, 0x17, 0xb3, 0xa0, 0x7f, 0x6c, 0x23, 0x01, 0x8b, 0xe7, 0x5a, 0x4a, 0x96, 0xe2, 0x14, - 0x4e, 0xba, 0x73, 0xd4, 0x70, 0x05, 0x2c, 0xbd, 0x66, 0xc1, 0x9d, 0xa3, 0x86, 0x42, 0xe0, 0x3b, - 0x47, 0x2d, 0xc1, 0x31, 0x3c, 0xe8, 0x29, 0x98, 0xf2, 0x65, 0x0e, 0x1c, 0xb6, 0x82, 0x17, 0xa2, - 0x78, 0x62, 0x0d, 0xb5, 0x02, 0xc7, 0xdb, 0xe9, 0xff, 0x56, 0x03, 0x08, 0xb5, 0x07, 0xa7, 0xa1, - 0x13, 0x37, 0x63, 0x0a, 0x95, 0xc5, 0x81, 0xb4, 0x1d, 0x24, 0x57, 0x33, 0xfe, 0x2d, 0x0d, 0xa6, - 0xa3, 0x66, 0xa7, 0xc0, 0xaa, 0x37, 0xe3, 0xac, 0xfa, 0xfb, 0x07, 0x9b, 0x57, 0x0e, 0xbf, 0xfe, - 0x7f, 0x4a, 0xea, 0xac, 0x18, 0x37, 0xb6, 0x13, 0x7b, 0x63, 0xa6, 0xa8, 0x6f, 0x0d, 0xf2, 0xc6, - 0xac, 0xba, 0x50, 0x47, 0xf3, 0xcd, 0x78, 0x73, 0xfe, 0x3b, 0x31, 0x5e, 0x68, 0x80, 0x40, 0x01, - 0x21, 0xe3, 0x23, 0x51, 0xf3, 0x05, 0x38, 0x88, 0x31, 0x7a, 0x55, 0x25, 0x95, 0xfc, 0xb5, 0xfa, - 0x03, 0xc5, 0xbc, 0xd3, 0x95, 0x09, 0xf7, 0x25, 0x90, 0xfa, 0xd7, 0xa6, 0x60, 0x42, 0x51, 0xb4, - 0x25, 0x5e, 0xcc, 0xb5, 0xd3, 0x78, 0x31, 0x0f, 0x60, 0xa2, 0x19, 0x06, 0x84, 0x97, 0xcb, 0x3e, - 0x20, 0xce, 0x90, 0x44, 0x47, 0xa1, 0xe6, 0x7d, 0xac, 0xa2, 0xa1, 0x8c, 0x44, 0xb8, 0xc7, 0x86, - 0x8e, 0xc1, 0x8e, 0xa1, 0xdf, 0xbe, 0x7a, 0x27, 0x80, 0xe4, 0x45, 0x89, 0x29, 0x22, 0x7a, 0x86, - 0x46, 0xe8, 0x75, 0xff, 0x56, 0x58, 0x87, 0x95, 0x76, 0xe9, 0x17, 0xd8, 0x91, 0x53, 0x7b, 0x81, - 0xa5, 0xdb, 0xc0, 0x96, 0x59, 0x83, 0x06, 0xb2, 0xc9, 0x09, 0x73, 0x0f, 0x45, 0xdb, 0x20, 0x2c, - 0xf2, 0xb1, 0x82, 0x24, 0xc7, 0x70, 0x62, 0xac, 0x90, 0xe1, 0x44, 0x17, 0xce, 0x79, 0x24, 0xf0, - 0x7a, 0xd5, 0x5e, 0x93, 0x25, 0xd3, 0xf2, 0x02, 0x26, 0x51, 0x96, 0x8b, 0x45, 0x98, 0xc2, 0x69, - 0x50, 0x38, 0x0b, 0x7e, 0x8c, 0x19, 0x1b, 0xef, 0xcb, 0x8c, 0xbd, 0x0b, 0x26, 0x02, 0xd2, 0xdc, - 0x72, 0xac, 0xa6, 0x61, 0xd7, 0x6b, 0x22, 0xdc, 0x65, 0xc4, 0x57, 0x44, 0x55, 0x58, 0x6d, 0x87, - 0x16, 0x61, 0xa8, 0x6b, 0x99, 0x82, 0x1b, 0xfd, 0xb1, 0x50, 0x65, 0x5d, 0xaf, 0xdd, 0xdf, 0xab, - 0xbc, 0x35, 0xb2, 0x44, 0x08, 0x67, 0x75, 0xad, 0xb3, 0xdd, 0xba, 0x16, 0xf4, 0x3a, 0xc4, 0x9f, - 0xbf, 0x5b, 0xaf, 0x61, 0xda, 0x39, 0xcb, 0xa8, 0x64, 0xf2, 0x08, 0x46, 0x25, 0x5f, 0xd0, 0xe0, - 0x9c, 0x91, 0xd4, 0xb6, 0x13, 0x7f, 0x76, 0xaa, 0x38, 0xb5, 0xcc, 0xd6, 0xe0, 0x2f, 0x5e, 0x16, - 0xf3, 0x3b, 0xb7, 0x90, 0x46, 0x87, 0xb3, 0xc6, 0x80, 0x3c, 0x40, 0x6d, 0xab, 0x15, 0x26, 0xf0, - 0x11, 0x5f, 0x7d, 0xba, 0x98, 0x1e, 0x61, 0x25, 0x05, 0x09, 0x67, 0x40, 0x47, 0xf7, 0x60, 0xa2, - 0x19, 0xe9, 0xe4, 0x05, 0x57, 0x5d, 0x3b, 0x8e, 0x47, 0x01, 0x2e, 0x79, 0xa9, 0x0a, 0x7f, 0x15, - 0x53, 0xf8, 0x9a, 0xa6, 0x88, 0xbc, 0xe2, 0x45, 0x89, 0xcd, 0x7a, 0xa6, 0xf8, 0x6b, 0x5a, 0x36, - 0x44, 0xdc, 0x07, 0x1b, 0x8b, 0xeb, 0x64, 0xc7, 0xf3, 0x6c, 0xb1, 0x14, 0xf3, 0x05, 0x7d, 0xc1, - 0x13, 0x29, 0xbb, 0xf8, 0xd6, 0x4c, 0x14, 0xe2, 0x24, 0x42, 0xfd, 0x9b, 0x9a, 0x50, 0x98, 0x9d, - 0xa2, 0x35, 0xc4, 0x49, 0x3f, 0xa5, 0xe9, 0x7f, 0xa1, 0x41, 0x8a, 0x47, 0x47, 0x1b, 0x30, 0x46, - 0x41, 0xd4, 0x56, 0x1b, 0x62, 0x5a, 0xef, 0x2d, 0x76, 0x5d, 0x32, 0x10, 0x5c, 0xfb, 0x28, 0x7e, - 0x60, 0x09, 0x98, 0x72, 0xfd, 0x8e, 0x12, 0x71, 0x5b, 0xcc, 0xb0, 0x10, 0x3f, 0xa2, 0x46, 0xee, - 0xe6, 0x5c, 0xbf, 0x5a, 0x82, 0x63, 0x78, 0xf4, 0x65, 0x80, 0x48, 0xae, 0x1a, 0xd8, 0x40, 0xe6, - 0xfb, 0x23, 0x70, 0x61, 0x50, 0x67, 0x03, 0x96, 0xde, 0x89, 0xec, 0x58, 0xcd, 0x60, 0x61, 0x33, - 0x20, 0xde, 0x9d, 0x3b, 0x2b, 0xeb, 0x5b, 0x1e, 0xf1, 0xb7, 0x5c, 0xdb, 0x2c, 0x98, 0x5f, 0x8a, - 0x3d, 0xa8, 0x2d, 0x65, 0x42, 0xc4, 0x39, 0x98, 0x98, 0x4c, 0x29, 0xd2, 0x4d, 0x63, 0xca, 0x4c, - 0x76, 0x3d, 0x3f, 0x10, 0x51, 0x6d, 0xb8, 0x4c, 0x99, 0xac, 0xc4, 0xe9, 0xf6, 0x49, 0x20, 0xcb, - 0x56, 0xdb, 0xe2, 0x19, 0x7c, 0xb4, 0x34, 0x10, 0x56, 0x89, 0xd3, 0xed, 0x55, 0x20, 0xfc, 0x4b, - 0xd1, 0xd3, 0x3e, 0x92, 0x06, 0x12, 0x56, 0xe2, 0x74, 0x7b, 0x64, 0xc2, 0x83, 0x1e, 0x69, 0xba, - 0xed, 0x36, 0x71, 0x4c, 0x9e, 0x39, 0xd1, 0xf0, 0x5a, 0x96, 0x73, 0xc3, 0x33, 0x58, 0x43, 0xa6, - 0xa2, 0xd3, 0x58, 0x1e, 0x8a, 0x07, 0x71, 0x9f, 0x76, 0xb8, 0x2f, 0x14, 0xd4, 0x86, 0x33, 0x5d, - 0x96, 0xf3, 0xc3, 0xab, 0x3b, 0x01, 0xf1, 0x76, 0x0c, 0x5b, 0xe8, 0xe1, 0x0a, 0xa5, 0x8c, 0xbe, - 0x1b, 0x07, 0x85, 0x93, 0xb0, 0x51, 0x8f, 0xf2, 0x1d, 0x62, 0x38, 0x0a, 0xca, 0x72, 0xf1, 0x04, - 0x68, 0x38, 0x0d, 0x0e, 0x67, 0xe1, 0xd0, 0xbf, 0xa0, 0x81, 0xb0, 0x44, 0x46, 0x0f, 0xc6, 0xde, - 0x3a, 0xca, 0x89, 0x77, 0x0e, 0x99, 0x79, 0xa2, 0x94, 0x99, 0x79, 0xe2, 0xed, 0x4a, 0xb8, 0xa4, - 0xf1, 0x88, 0xf6, 0x71, 0xc8, 0x4a, 0xd6, 0x9c, 0xc7, 0x61, 0x9c, 0xf0, 0x67, 0xb4, 0x90, 0xa3, - 0x65, 0xd6, 0xdd, 0x4b, 0xb2, 0x10, 0x47, 0xf5, 0xfa, 0x9f, 0x68, 0x20, 0x20, 0xb0, 0x1c, 0x4f, - 0x87, 0xca, 0xf5, 0x73, 0xa0, 0x69, 0x93, 0x92, 0xa3, 0x68, 0x28, 0x37, 0x47, 0xd1, 0x09, 0xa5, - 0xee, 0xf9, 0x1d, 0x0d, 0xce, 0xc4, 0xe3, 0x57, 0xf9, 0xe8, 0x6d, 0x30, 0x26, 0x22, 0x5c, 0x8a, - 0x10, 0x75, 0xac, 0xab, 0x08, 0x31, 0x81, 0x65, 0x5d, 0x5c, 0x1d, 0x36, 0x80, 0x88, 0x99, 0x1d, - 0x46, 0xeb, 0x00, 0x69, 0xef, 0x53, 0x33, 0x30, 0xca, 0xc3, 0x23, 0x52, 0x9a, 0x96, 0xe1, 0xb6, - 0x79, 0xbb, 0x78, 0x14, 0xc6, 0x22, 0xbe, 0x76, 0x6a, 0x26, 0x82, 0x52, 0xdf, 0x4c, 0x04, 0x98, - 0xa7, 0x44, 0x1b, 0xe0, 0xe9, 0xa3, 0x8a, 0xeb, 0x22, 0x13, 0xba, 0x4c, 0x87, 0x16, 0xc4, 0xde, - 0x04, 0x86, 0x8b, 0x73, 0x6e, 0x7c, 0x01, 0x94, 0x97, 0x81, 0xe9, 0xbe, 0xaf, 0x02, 0x32, 0xfe, - 0xdc, 0x48, 0x71, 0x53, 0x43, 0xb1, 0xe4, 0x87, 0x88, 0x3f, 0x17, 0x1e, 0xa4, 0xd1, 0xdc, 0x83, - 0xb4, 0x09, 0x63, 0xe2, 0x28, 0x08, 0xe2, 0xf8, 0xde, 0x01, 0x72, 0x8b, 0x29, 0x21, 0x93, 0x79, - 0x01, 0x96, 0xc0, 0xe9, 0x8d, 0xdb, 0x36, 0x76, 0xad, 0x76, 0xb7, 0xcd, 0x28, 0xe2, 0x88, 0xda, - 0x94, 0x15, 0x63, 0x59, 0xcf, 0x9a, 0x72, 0x0b, 0x4d, 0x26, 0x48, 0xa9, 0x4d, 0x79, 0x31, 0x96, - 0xf5, 0xe8, 0x25, 0x28, 0xb7, 0x8d, 0xdd, 0x46, 0xd7, 0x6b, 0x11, 0xf1, 0x22, 0x90, 0xcf, 0xe3, - 0x75, 0x03, 0xcb, 0x9e, 0xa7, 0xe2, 0x7f, 0xe0, 0xcd, 0xd7, 0x9d, 0xe0, 0x8e, 0xd7, 0x08, 0xbc, - 0x30, 0xc1, 0xd0, 0x8a, 0x80, 0x82, 0x43, 0x78, 0xc8, 0x86, 0xe9, 0xb6, 0xb1, 0x7b, 0xd7, 0x31, - 0x78, 0x68, 0x41, 0x9b, 0x3f, 0x04, 0x14, 0xc1, 0xc0, 0x9e, 0x85, 0x57, 0x62, 0xb0, 0x70, 0x02, - 0x76, 0xc6, 0x0b, 0xf4, 0xe4, 0x49, 0xbd, 0x40, 0x2f, 0x84, 0xfe, 0x36, 0x5c, 0x6e, 0xbb, 0x94, - 0xe9, 0xd9, 0xde, 0xd7, 0x97, 0xe6, 0xe5, 0xd0, 0x97, 0x66, 0xba, 0xf8, 0x93, 0x69, 0x1f, 0x3f, - 0x9a, 0x2e, 0x4c, 0x50, 0x0e, 0x9b, 0x97, 0x52, 0xc1, 0xaa, 0xb0, 0x0a, 0xb2, 0x16, 0x82, 0x51, - 0x12, 0xd8, 0x46, 0xa0, 0xb1, 0x8a, 0x07, 0xdd, 0xe1, 0x09, 0xe9, 0x6d, 0x12, 0x44, 0x4d, 0x98, - 0x40, 0x3f, 0xc3, 0xce, 0x4f, 0x98, 0x3f, 0x3e, 0xd5, 0x00, 0x67, 0xf7, 0x8b, 0x22, 0xe5, 0x9c, - 0xcd, 0x8e, 0x94, 0x83, 0x7e, 0x3e, 0x4b, 0xcf, 0x8f, 0xd8, 0x9a, 0x7e, 0xb0, 0x38, 0x6d, 0x28, - 0xac, 0xed, 0xff, 0x97, 0x1a, 0xcc, 0xb6, 0x73, 0x32, 0xbd, 0x8a, 0xe7, 0x87, 0xf5, 0x01, 0xe8, - 0x43, 0x6e, 0xf6, 0xd8, 0xc5, 0x47, 0xf6, 0xf7, 0x2a, 0x07, 0xe6, 0x98, 0xc5, 0xb9, 0x63, 0x43, - 0x1e, 0x8c, 0xf9, 0x3d, 0xbf, 0x19, 0xd8, 0xfe, 0xec, 0xf9, 0xe2, 0x09, 0x45, 0x05, 0x65, 0x6d, - 0x70, 0x48, 0x9c, 0xb4, 0x46, 0x81, 0xfa, 0x79, 0x29, 0x96, 0x88, 0x06, 0xf5, 0xd3, 0x1e, 0x20, - 0x38, 0xe8, 0xdc, 0x75, 0x98, 0x54, 0x07, 0x79, 0x24, 0xf7, 0xf0, 0x5f, 0xd5, 0x60, 0x26, 0x79, - 0x69, 0xa9, 0x39, 0xff, 0xb5, 0x93, 0xcd, 0xf9, 0xaf, 0xd8, 0xbf, 0x94, 0xfa, 0xd8, 0xbf, 0x3c, - 0x03, 0x17, 0xb3, 0xf7, 0x32, 0xe5, 0x20, 0x0d, 0xdb, 0x76, 0xef, 0x09, 0xc9, 0x2d, 0xca, 0xe1, - 0x45, 0x0b, 0x31, 0xaf, 0xd3, 0x3f, 0x06, 0xc9, 0x50, 0xd0, 0xe8, 0x15, 0x18, 0xf7, 0xfd, 0x2d, - 0x1e, 0xe5, 0x53, 0x4c, 0xb2, 0x98, 0xc8, 0x2e, 0x43, 0x85, 0x0a, 0x97, 0x46, 0xf9, 0x13, 0x47, - 0xe0, 0x17, 0x5f, 0xfc, 0xea, 0x77, 0xaf, 0xbc, 0xe5, 0x1b, 0xdf, 0xbd, 0xf2, 0x96, 0x6f, 0x7f, - 0xf7, 0xca, 0x5b, 0x7e, 0x6a, 0xff, 0x8a, 0xf6, 0xd5, 0xfd, 0x2b, 0xda, 0x37, 0xf6, 0xaf, 0x68, - 0xdf, 0xde, 0xbf, 0xa2, 0xfd, 0xa7, 0xfd, 0x2b, 0xda, 0x2f, 0xfc, 0xe7, 0x2b, 0x6f, 0x79, 0xe9, - 0xc9, 0x08, 0xfb, 0x35, 0x89, 0x34, 0xfa, 0xa7, 0xb3, 0xdd, 0xba, 0x46, 0xb1, 0x4b, 0xd7, 0x22, - 0x86, 0xfd, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xf5, 0xa2, 0xb7, 0xf3, 0x7f, 0xea, 0x00, 0x00, + // 11965 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x6c, 0x25, 0x59, + 0x56, 0x18, 0xbe, 0xf5, 0xfc, 0x7d, 0xfc, 0xd1, 0xee, 0xdb, 0x1f, 0xe3, 0x76, 0xcf, 0xf4, 0xeb, + 0xad, 0x99, 0xdd, 0xdf, 0x0c, 0xbb, 0xb8, 0x99, 0x61, 0x97, 0xdd, 0xe9, 0x65, 0x76, 0xd6, 0x7e, + 0xcf, 0xdd, 0xfd, 0x68, 0xdb, 0xed, 0xb9, 0xcf, 0x9e, 0x19, 0x06, 0x7e, 0x03, 0xe5, 0xaa, 0xeb, + 0xe7, 0x1a, 0xd7, 0xab, 0x7a, 0x53, 0x55, 0xcf, 0xed, 0x37, 0x03, 0x81, 0xdd, 0xb0, 0x84, 0x1d, + 0xd8, 0x08, 0x21, 0x91, 0xd5, 0x2e, 0x24, 0x2c, 0x42, 0xe4, 0x8b, 0x88, 0x20, 0x22, 0x22, 0x01, + 0x8a, 0x84, 0x90, 0x12, 0x76, 0x11, 0xa0, 0x15, 0x24, 0xca, 0xa2, 0x04, 0x93, 0x75, 0x08, 0x44, + 0x4a, 0x84, 0x22, 0xa1, 0x28, 0x4a, 0x07, 0x91, 0xe8, 0x7e, 0x55, 0xdd, 0xfa, 0x7a, 0xb6, 0xeb, + 0xd9, 0xde, 0x1d, 0xc1, 0x5f, 0xf6, 0xbb, 0xe7, 0xde, 0x73, 0xee, 0xbd, 0x75, 0xef, 0xb9, 0xe7, + 0x9e, 0x7b, 0x3e, 0x60, 0xa9, 0x65, 0x87, 0x3b, 0xdd, 0xad, 0x05, 0xd3, 0x6b, 0xdf, 0x6a, 0x19, + 0xbe, 0x45, 0x5c, 0xe2, 0xc7, 0xff, 0x74, 0x76, 0x5b, 0xb7, 0x8c, 0x8e, 0x1d, 0xdc, 0x32, 0x3d, + 0x9f, 0xdc, 0xda, 0x7b, 0x76, 0x8b, 0x84, 0xc6, 0xb3, 0xb7, 0x5a, 0x14, 0x66, 0x84, 0xc4, 0x5a, + 0xe8, 0xf8, 0x5e, 0xe8, 0xa1, 0xe7, 0x62, 0x1c, 0x0b, 0xb2, 0x69, 0xfc, 0x4f, 0x67, 0xb7, 0xb5, + 0x40, 0x71, 0x2c, 0x50, 0x1c, 0x0b, 0x02, 0xc7, 0xfc, 0x37, 0xab, 0x74, 0xbd, 0x96, 0x77, 0x8b, + 0xa1, 0xda, 0xea, 0x6e, 0xb3, 0x5f, 0xec, 0x07, 0xfb, 0x8f, 0x93, 0x98, 0x7f, 0x66, 0xf7, 0xa3, + 0xc1, 0x82, 0xed, 0xd1, 0xce, 0xdc, 0x32, 0xba, 0xa1, 0x17, 0x98, 0x86, 0x63, 0xbb, 0xad, 0x5b, + 0x7b, 0x99, 0xde, 0xcc, 0xeb, 0x4a, 0x55, 0xd1, 0xed, 0xbe, 0x75, 0xfc, 0x2d, 0xc3, 0xcc, 0xab, + 0xf3, 0xa1, 0xb8, 0x4e, 0xdb, 0x30, 0x77, 0x6c, 0x97, 0xf8, 0x3d, 0x39, 0x21, 0xb7, 0x7c, 0x12, + 0x78, 0x5d, 0xdf, 0x24, 0x27, 0x6a, 0x15, 0xdc, 0x6a, 0x93, 0xd0, 0xc8, 0xa3, 0x75, 0xab, 0xa8, + 0x95, 0xdf, 0x75, 0x43, 0xbb, 0x9d, 0x25, 0xf3, 0x6d, 0x47, 0x35, 0x08, 0xcc, 0x1d, 0xd2, 0x36, + 0x32, 0xed, 0xbe, 0xb5, 0xa8, 0x5d, 0x37, 0xb4, 0x9d, 0x5b, 0xb6, 0x1b, 0x06, 0xa1, 0x9f, 0x6e, + 0xa4, 0xbf, 0xa3, 0xc1, 0xec, 0xe2, 0x7a, 0xa3, 0x49, 0xfc, 0x3d, 0xe2, 0xaf, 0x78, 0xad, 0x96, + 0xed, 0xb6, 0xd0, 0x07, 0x60, 0x62, 0x8f, 0xf8, 0x5b, 0x5e, 0x60, 0x87, 0xbd, 0x39, 0xed, 0xa6, + 0xf6, 0xf4, 0xc8, 0xd2, 0xf4, 0xe1, 0x41, 0x75, 0xe2, 0x65, 0x59, 0x88, 0x63, 0x38, 0x6a, 0xc0, + 0xa5, 0x9d, 0x30, 0xec, 0x2c, 0x9a, 0x26, 0x09, 0x82, 0xa8, 0xc6, 0x5c, 0x85, 0x35, 0x7b, 0xec, + 0xf0, 0xa0, 0x7a, 0xe9, 0xde, 0xc6, 0xc6, 0x7a, 0x0a, 0x8c, 0xf3, 0xda, 0xe8, 0xbf, 0xac, 0xc1, + 0xc5, 0xa8, 0x33, 0x98, 0xbc, 0xd9, 0x25, 0x41, 0x18, 0x20, 0x0c, 0x57, 0xdb, 0xc6, 0xfe, 0x9a, + 0xe7, 0xae, 0x76, 0x43, 0x23, 0xb4, 0xdd, 0x56, 0xc3, 0xdd, 0x76, 0xec, 0xd6, 0x4e, 0x28, 0xba, + 0x36, 0x7f, 0x78, 0x50, 0xbd, 0xba, 0x9a, 0x5b, 0x03, 0x17, 0xb4, 0xa4, 0x9d, 0x6e, 0x1b, 0xfb, + 0x19, 0x84, 0x4a, 0xa7, 0x57, 0xb3, 0x60, 0x9c, 0xd7, 0x46, 0x7f, 0x0e, 0x46, 0x16, 0x2d, 0xcb, + 0x73, 0xd1, 0x33, 0x30, 0x46, 0x5c, 0x63, 0xcb, 0x21, 0x16, 0xeb, 0xd8, 0xf8, 0xd2, 0x85, 0x2f, + 0x1d, 0x54, 0xdf, 0x73, 0x78, 0x50, 0x1d, 0x5b, 0xe6, 0xc5, 0x58, 0xc2, 0xf5, 0x9f, 0xac, 0xc0, + 0x28, 0x6b, 0x14, 0xa0, 0x9f, 0xd0, 0xe0, 0xd2, 0x6e, 0x77, 0x8b, 0xf8, 0x2e, 0x09, 0x49, 0x50, + 0x37, 0x82, 0x9d, 0x2d, 0xcf, 0xf0, 0x39, 0x8a, 0xc9, 0xe7, 0xee, 0x2e, 0x9c, 0x7c, 0xff, 0x2d, + 0xdc, 0xcf, 0xa2, 0xe3, 0x63, 0xca, 0x01, 0xe0, 0x3c, 0xe2, 0x68, 0x0f, 0xa6, 0xdc, 0x96, 0xed, + 0xee, 0x37, 0xdc, 0x96, 0x4f, 0x82, 0x80, 0xcd, 0xcb, 0xe4, 0x73, 0x9f, 0x28, 0xd3, 0x99, 0x35, + 0x05, 0xcf, 0xd2, 0xec, 0xe1, 0x41, 0x75, 0x4a, 0x2d, 0xc1, 0x09, 0x3a, 0xfa, 0x5f, 0x69, 0x70, + 0x61, 0xd1, 0x6a, 0xdb, 0x41, 0x60, 0x7b, 0xee, 0xba, 0xd3, 0x6d, 0xd9, 0x2e, 0xba, 0x09, 0xc3, + 0xae, 0xd1, 0x26, 0x6c, 0x42, 0x26, 0x96, 0xa6, 0xc4, 0x9c, 0x0e, 0xaf, 0x19, 0x6d, 0x82, 0x19, + 0x04, 0xbd, 0x04, 0xa3, 0xa6, 0xe7, 0x6e, 0xdb, 0x2d, 0xd1, 0xcf, 0x6f, 0x5e, 0xe0, 0x3b, 0x61, + 0x41, 0xdd, 0x09, 0xac, 0x7b, 0x62, 0x07, 0x2d, 0x60, 0xe3, 0xe1, 0xf2, 0x7e, 0x48, 0x5c, 0x4a, + 0x66, 0x09, 0x0e, 0x0f, 0xaa, 0xa3, 0x35, 0x86, 0x00, 0x0b, 0x44, 0xe8, 0x69, 0x18, 0xb7, 0xec, + 0x80, 0x7f, 0xcc, 0x21, 0xf6, 0x31, 0xa7, 0x0e, 0x0f, 0xaa, 0xe3, 0x75, 0x51, 0x86, 0x23, 0x28, + 0x5a, 0x81, 0xcb, 0x74, 0x06, 0x79, 0xbb, 0x26, 0x31, 0x7d, 0x12, 0xd2, 0xae, 0xcd, 0x0d, 0xb3, + 0xee, 0xce, 0x1d, 0x1e, 0x54, 0x2f, 0xdf, 0xcf, 0x81, 0xe3, 0xdc, 0x56, 0xfa, 0x1d, 0x18, 0x5f, + 0x74, 0x88, 0x4f, 0x17, 0x18, 0xba, 0x0d, 0x33, 0xa4, 0x6d, 0xd8, 0x0e, 0x26, 0x26, 0xb1, 0xf7, + 0x88, 0x1f, 0xcc, 0x69, 0x37, 0x87, 0x9e, 0x9e, 0x58, 0x42, 0x87, 0x07, 0xd5, 0x99, 0xe5, 0x04, + 0x04, 0xa7, 0x6a, 0xea, 0x9f, 0xd4, 0x60, 0x72, 0xb1, 0x6b, 0xd9, 0x21, 0x1f, 0x17, 0xf2, 0x61, + 0xd2, 0xa0, 0x3f, 0xd7, 0x3d, 0xc7, 0x36, 0x7b, 0x62, 0x71, 0xbd, 0x58, 0xe6, 0x7b, 0x2e, 0xc6, + 0x68, 0x96, 0x2e, 0x1c, 0x1e, 0x54, 0x27, 0x95, 0x02, 0xac, 0x12, 0xd1, 0x77, 0x40, 0x85, 0xa1, + 0xef, 0x84, 0x29, 0x3e, 0xdc, 0x55, 0xa3, 0x83, 0xc9, 0xb6, 0xe8, 0xc3, 0x93, 0xca, 0xb7, 0x92, + 0x84, 0x16, 0x1e, 0x6c, 0xbd, 0x41, 0xcc, 0x10, 0x93, 0x6d, 0xe2, 0x13, 0xd7, 0x24, 0x7c, 0xd9, + 0xd4, 0x94, 0xc6, 0x38, 0x81, 0x4a, 0xff, 0x63, 0xca, 0xc4, 0xf6, 0x0c, 0xdb, 0x31, 0xb6, 0x6c, + 0xc7, 0x0e, 0x7b, 0xaf, 0x79, 0x2e, 0x39, 0xc6, 0xba, 0xd9, 0x84, 0xc7, 0xba, 0xae, 0xc1, 0xdb, + 0x39, 0x64, 0x95, 0xaf, 0x94, 0x8d, 0x5e, 0x87, 0xd0, 0x05, 0x4f, 0x67, 0xfa, 0xfa, 0xe1, 0x41, + 0xf5, 0xb1, 0xcd, 0xfc, 0x2a, 0xb8, 0xa8, 0x2d, 0xe5, 0x57, 0x0a, 0xe8, 0x65, 0xcf, 0xe9, 0xb6, + 0x05, 0xd6, 0x21, 0x86, 0x95, 0xf1, 0xab, 0xcd, 0xdc, 0x1a, 0xb8, 0xa0, 0xa5, 0xfe, 0xa5, 0x0a, + 0x4c, 0x2d, 0x19, 0xe6, 0x6e, 0xb7, 0xb3, 0xd4, 0x35, 0x77, 0x49, 0x88, 0xbe, 0x17, 0xc6, 0xe9, + 0x81, 0x63, 0x19, 0xa1, 0x21, 0x66, 0xf2, 0x5b, 0x0a, 0x57, 0x3d, 0xfb, 0x88, 0xb4, 0x76, 0x3c, + 0xb7, 0xab, 0x24, 0x34, 0x96, 0x90, 0x98, 0x13, 0x88, 0xcb, 0x70, 0x84, 0x15, 0x6d, 0xc3, 0x70, + 0xd0, 0x21, 0xa6, 0xd8, 0x53, 0xf5, 0x32, 0x6b, 0x45, 0xed, 0x71, 0xb3, 0x43, 0xcc, 0xf8, 0x2b, + 0xd0, 0x5f, 0x98, 0xe1, 0x47, 0x2e, 0x8c, 0x06, 0xa1, 0x11, 0x76, 0x03, 0xb6, 0xd1, 0x26, 0x9f, + 0xbb, 0x33, 0x30, 0x25, 0x86, 0x6d, 0x69, 0x46, 0xd0, 0x1a, 0xe5, 0xbf, 0xb1, 0xa0, 0xa2, 0xff, + 0x7b, 0x0d, 0x66, 0xd5, 0xea, 0x2b, 0x76, 0x10, 0xa2, 0xef, 0xce, 0x4c, 0xe7, 0xc2, 0xf1, 0xa6, + 0x93, 0xb6, 0x66, 0x93, 0x39, 0x2b, 0xc8, 0x8d, 0xcb, 0x12, 0x65, 0x2a, 0x09, 0x8c, 0xd8, 0x21, + 0x69, 0xf3, 0x65, 0x55, 0x92, 0x8f, 0xaa, 0x5d, 0x5e, 0x9a, 0x16, 0xc4, 0x46, 0x1a, 0x14, 0x2d, + 0xe6, 0xd8, 0xf5, 0xef, 0x85, 0xcb, 0x6a, 0xad, 0x75, 0xdf, 0xdb, 0xb3, 0x2d, 0xe2, 0xd3, 0x9d, + 0x10, 0xf6, 0x3a, 0x99, 0x9d, 0x40, 0x57, 0x16, 0x66, 0x10, 0xf4, 0x7e, 0x18, 0xf5, 0x49, 0xcb, + 0xf6, 0x5c, 0xf6, 0xb5, 0x27, 0xe2, 0xb9, 0xc3, 0xac, 0x14, 0x0b, 0xa8, 0xfe, 0x3f, 0x2b, 0xc9, + 0xb9, 0xa3, 0x9f, 0x11, 0xed, 0xc1, 0x78, 0x47, 0x90, 0x12, 0x73, 0x77, 0x6f, 0xd0, 0x01, 0xca, + 0xae, 0xc7, 0xb3, 0x2a, 0x4b, 0x70, 0x44, 0x0b, 0xd9, 0x30, 0x23, 0xff, 0xaf, 0x0d, 0xc0, 0xfe, + 0x19, 0x3b, 0x5d, 0x4f, 0x20, 0xc2, 0x29, 0xc4, 0x68, 0x03, 0x26, 0x02, 0xc6, 0xa4, 0x29, 0xe3, + 0x1a, 0x2a, 0x66, 0x5c, 0x4d, 0x59, 0x49, 0x30, 0xae, 0x8b, 0xa2, 0xfb, 0x13, 0x11, 0x00, 0xc7, + 0x88, 0xe8, 0x21, 0x13, 0x10, 0x62, 0x29, 0xc7, 0x05, 0x3b, 0x64, 0x9a, 0xa2, 0x0c, 0x47, 0x50, + 0xfd, 0x8b, 0xc3, 0x80, 0xb2, 0x4b, 0x5c, 0x9d, 0x01, 0x5e, 0x22, 0xe6, 0x7f, 0x90, 0x19, 0x10, + 0xbb, 0x25, 0x85, 0x18, 0xbd, 0x05, 0xd3, 0x8e, 0x11, 0x84, 0x0f, 0x3a, 0x54, 0x7a, 0x94, 0x0b, + 0x65, 0xf2, 0xb9, 0xc5, 0x32, 0x5f, 0x7a, 0x45, 0x45, 0xb4, 0x74, 0xf1, 0xf0, 0xa0, 0x3a, 0x9d, + 0x28, 0xc2, 0x49, 0x52, 0xe8, 0x0d, 0x98, 0xa0, 0x05, 0xcb, 0xbe, 0xef, 0xf9, 0x62, 0xf6, 0x5f, + 0x28, 0x4b, 0x97, 0x21, 0xe1, 0xd2, 0x6c, 0xf4, 0x13, 0xc7, 0xe8, 0xd1, 0x77, 0x00, 0xf2, 0xb6, + 0x02, 0x2a, 0x80, 0x5a, 0x77, 0xb9, 0xa8, 0x4c, 0x07, 0x4b, 0xbf, 0xce, 0xd0, 0xd2, 0xbc, 0xf8, + 0x9a, 0xe8, 0x41, 0xa6, 0x06, 0xce, 0x69, 0x85, 0x76, 0x01, 0x45, 0xe2, 0x76, 0xb4, 0x00, 0xe6, + 0x46, 0x8e, 0xbf, 0x7c, 0xae, 0x52, 0x62, 0x77, 0x33, 0x28, 0x70, 0x0e, 0x5a, 0xfd, 0x5f, 0x57, + 0x60, 0x92, 0x2f, 0x91, 0x65, 0x37, 0xf4, 0x7b, 0xe7, 0x70, 0x40, 0x90, 0xc4, 0x01, 0x51, 0x2b, + 0xbf, 0xe7, 0x59, 0x87, 0x0b, 0xcf, 0x87, 0x76, 0xea, 0x7c, 0x58, 0x1e, 0x94, 0x50, 0xff, 0xe3, + 0xe1, 0xdf, 0x69, 0x70, 0x41, 0xa9, 0x7d, 0x0e, 0xa7, 0x83, 0x95, 0x3c, 0x1d, 0x5e, 0x1c, 0x70, + 0x7c, 0x05, 0x87, 0x83, 0x97, 0x18, 0x16, 0x63, 0xdc, 0xcf, 0x01, 0x6c, 0x31, 0x76, 0xb2, 0x16, + 0xcb, 0x49, 0xd1, 0x27, 0x5f, 0x8a, 0x20, 0x58, 0xa9, 0x95, 0xe0, 0x59, 0x95, 0xbe, 0x3c, 0xeb, + 0xbf, 0x0c, 0xc1, 0xc5, 0xcc, 0xb4, 0x67, 0xf9, 0x88, 0xf6, 0x75, 0xe2, 0x23, 0x95, 0xaf, 0x07, + 0x1f, 0x19, 0x2a, 0xc5, 0x47, 0x8e, 0x7d, 0x4e, 0x20, 0x1f, 0x50, 0xdb, 0x6e, 0xf1, 0x66, 0xcd, + 0xd0, 0xf0, 0xc3, 0x0d, 0xbb, 0x4d, 0x04, 0xc7, 0xf9, 0xa6, 0xe3, 0x2d, 0x59, 0xda, 0x82, 0x33, + 0x9e, 0xd5, 0x0c, 0x26, 0x9c, 0x83, 0x5d, 0xff, 0xfd, 0x61, 0x80, 0xda, 0x22, 0xf6, 0x42, 0xde, + 0xd9, 0x17, 0x61, 0xa4, 0xb3, 0x63, 0x04, 0x72, 0x3d, 0x3d, 0x23, 0x17, 0xe3, 0x3a, 0x2d, 0x7c, + 0x74, 0x50, 0x9d, 0xab, 0xf9, 0xc4, 0x22, 0x6e, 0x68, 0x1b, 0x4e, 0x20, 0x1b, 0x31, 0x18, 0xe6, + 0xed, 0xe8, 0x18, 0xe8, 0x34, 0xd6, 0xbc, 0x76, 0xc7, 0x21, 0x14, 0xca, 0xc6, 0x50, 0x29, 0x37, + 0x86, 0x95, 0x0c, 0x26, 0x9c, 0x83, 0x5d, 0xd2, 0x6c, 0xb8, 0x76, 0x68, 0x1b, 0x11, 0xcd, 0xa1, + 0xf2, 0x34, 0x93, 0x98, 0x70, 0x0e, 0x76, 0xf4, 0x8e, 0x06, 0xf3, 0xc9, 0xe2, 0x3b, 0xb6, 0x6b, + 0x07, 0x3b, 0xc4, 0x62, 0xc4, 0x87, 0x4f, 0x4c, 0xfc, 0xc6, 0xe1, 0x41, 0x75, 0x7e, 0xa5, 0x10, + 0x23, 0xee, 0x43, 0x0d, 0x7d, 0x56, 0x83, 0xeb, 0xa9, 0x79, 0xf1, 0xed, 0x56, 0x8b, 0xf8, 0xa2, + 0x37, 0x27, 0x5f, 0x42, 0xd5, 0xc3, 0x83, 0xea, 0xf5, 0x95, 0x62, 0x94, 0xb8, 0x1f, 0x3d, 0xfd, + 0x37, 0x35, 0x18, 0xaa, 0xe1, 0x06, 0xfa, 0x40, 0xe2, 0x12, 0xf7, 0x98, 0x7a, 0x89, 0x7b, 0x74, + 0x50, 0x1d, 0xab, 0xe1, 0x86, 0x72, 0x9f, 0xfb, 0xac, 0x06, 0x17, 0x4d, 0xcf, 0x0d, 0x0d, 0xda, + 0x2f, 0xcc, 0x25, 0x1d, 0xc9, 0x55, 0x4b, 0xdd, 0x5f, 0x6a, 0x29, 0x64, 0x4b, 0xd7, 0x44, 0x07, + 0x2e, 0xa6, 0x21, 0x01, 0xce, 0x52, 0xd6, 0xbf, 0xaa, 0xc1, 0x54, 0xcd, 0xf1, 0xba, 0xd6, 0xba, + 0xef, 0x6d, 0xdb, 0x0e, 0x79, 0x77, 0x5c, 0xda, 0xd4, 0x1e, 0x17, 0x1d, 0xca, 0xec, 0x12, 0xa5, + 0x56, 0x7c, 0x97, 0x5c, 0xa2, 0xd4, 0x2e, 0x17, 0x9c, 0x93, 0x3f, 0x39, 0x96, 0x1c, 0x19, 0x3b, + 0x29, 0x9f, 0x86, 0x71, 0xd3, 0x58, 0xea, 0xba, 0x96, 0x13, 0xdd, 0xa2, 0x68, 0x2f, 0x6b, 0x8b, + 0xbc, 0x0c, 0x47, 0x50, 0xf4, 0x16, 0x40, 0xac, 0x50, 0x13, 0x9f, 0xe1, 0xce, 0x60, 0x4a, 0xbc, + 0x26, 0x09, 0x43, 0xdb, 0x6d, 0x05, 0xf1, 0xa7, 0x8f, 0x61, 0x58, 0xa1, 0x86, 0xbe, 0x1f, 0xa6, + 0xc5, 0x24, 0x37, 0xda, 0x46, 0x4b, 0xe8, 0x1b, 0x4a, 0xce, 0xd4, 0xaa, 0x82, 0x68, 0xe9, 0x8a, + 0x20, 0x3c, 0xad, 0x96, 0x06, 0x38, 0x49, 0x0d, 0xf5, 0x60, 0xaa, 0xad, 0xea, 0x50, 0x86, 0xcb, + 0x8b, 0x33, 0x8a, 0x3e, 0x65, 0xe9, 0xb2, 0x20, 0x3e, 0x95, 0xd0, 0xbe, 0x24, 0x48, 0xe5, 0x5c, + 0x05, 0x47, 0xce, 0xea, 0x2a, 0x48, 0x60, 0x8c, 0x5f, 0x86, 0x83, 0xb9, 0x51, 0x36, 0xc0, 0xdb, + 0x65, 0x06, 0xc8, 0xef, 0xd5, 0xb1, 0x86, 0x98, 0xff, 0x0e, 0xb0, 0xc4, 0x8d, 0xf6, 0x60, 0x8a, + 0x9e, 0xea, 0x4d, 0xe2, 0x10, 0x33, 0xf4, 0xfc, 0xb9, 0xb1, 0xf2, 0x1a, 0xd8, 0xa6, 0x82, 0x87, + 0xab, 0xd2, 0xd4, 0x12, 0x9c, 0xa0, 0x13, 0xe9, 0x0a, 0xc6, 0x0b, 0x75, 0x05, 0x5d, 0x98, 0xdc, + 0x53, 0x74, 0x5a, 0x13, 0x6c, 0x12, 0x3e, 0x5e, 0xa6, 0x63, 0xb1, 0x82, 0x6b, 0xe9, 0x92, 0x20, + 0x34, 0xa9, 0x2a, 0xc3, 0x54, 0x3a, 0xfa, 0x3f, 0x00, 0xb8, 0x58, 0x73, 0xba, 0x41, 0x48, 0xfc, + 0x45, 0xf1, 0x48, 0x44, 0x7c, 0xf4, 0x29, 0x0d, 0xae, 0xb2, 0x7f, 0xeb, 0xde, 0x43, 0xb7, 0x4e, + 0x1c, 0xa3, 0xb7, 0xb8, 0x4d, 0x6b, 0x58, 0xd6, 0xc9, 0x38, 0x50, 0xbd, 0x2b, 0xa4, 0x48, 0xa6, + 0x9c, 0x6b, 0xe6, 0x62, 0xc4, 0x05, 0x94, 0xd0, 0x8f, 0x6a, 0x70, 0x2d, 0x07, 0x54, 0x27, 0x0e, + 0x09, 0xa5, 0xe4, 0x72, 0xd2, 0x7e, 0x3c, 0x71, 0x78, 0x50, 0xbd, 0xd6, 0x2c, 0x42, 0x8a, 0x8b, + 0xe9, 0xa1, 0xbf, 0xab, 0xc1, 0x7c, 0x0e, 0xf4, 0x8e, 0x61, 0x3b, 0x5d, 0x5f, 0x0a, 0x35, 0x27, + 0xed, 0x0e, 0x93, 0x2d, 0x9a, 0x85, 0x58, 0x71, 0x1f, 0x8a, 0xe8, 0x07, 0xe0, 0x4a, 0x04, 0xdd, + 0x74, 0x5d, 0x42, 0xac, 0x84, 0x88, 0x73, 0xd2, 0xae, 0x5c, 0x3b, 0x3c, 0xa8, 0x5e, 0x69, 0xe6, + 0x21, 0xc4, 0xf9, 0x74, 0x50, 0x0b, 0x9e, 0x88, 0x01, 0xa1, 0xed, 0xd8, 0x6f, 0x71, 0x29, 0x6c, + 0xc7, 0x27, 0xc1, 0x8e, 0xe7, 0x58, 0x8c, 0x59, 0x68, 0x4b, 0xef, 0x3d, 0x3c, 0xa8, 0x3e, 0xd1, + 0xec, 0x57, 0x11, 0xf7, 0xc7, 0x83, 0x2c, 0x98, 0x0a, 0x4c, 0xc3, 0x6d, 0xb8, 0x21, 0xf1, 0xf7, + 0x0c, 0x67, 0x6e, 0xb4, 0xd4, 0x00, 0xf9, 0x16, 0x55, 0xf0, 0xe0, 0x04, 0x56, 0xf4, 0x51, 0x18, + 0x27, 0xfb, 0x1d, 0xc3, 0xb5, 0x08, 0x67, 0x0b, 0x13, 0x4b, 0x8f, 0xd3, 0xc3, 0x68, 0x59, 0x94, + 0x3d, 0x3a, 0xa8, 0x4e, 0xc9, 0xff, 0x57, 0x3d, 0x8b, 0xe0, 0xa8, 0x36, 0xfa, 0x3e, 0xb8, 0xcc, + 0xde, 0xc3, 0x2c, 0xc2, 0x98, 0x5c, 0x20, 0x05, 0xdd, 0xf1, 0x52, 0xfd, 0x64, 0x6f, 0x1b, 0xab, + 0x39, 0xf8, 0x70, 0x2e, 0x15, 0xfa, 0x19, 0xda, 0xc6, 0xfe, 0x5d, 0xdf, 0x30, 0xc9, 0x76, 0xd7, + 0xd9, 0x20, 0x7e, 0xdb, 0x76, 0xf9, 0x5d, 0x82, 0x98, 0x9e, 0x6b, 0x51, 0x56, 0xa2, 0x3d, 0x3d, + 0xc2, 0x3f, 0xc3, 0x6a, 0xbf, 0x8a, 0xb8, 0x3f, 0x1e, 0xf4, 0x21, 0x98, 0xb2, 0x5b, 0xae, 0xe7, + 0x93, 0x0d, 0xc3, 0x76, 0xc3, 0x60, 0x0e, 0x98, 0xda, 0x9d, 0x4d, 0x6b, 0x43, 0x29, 0xc7, 0x89, + 0x5a, 0x68, 0x0f, 0x90, 0x4b, 0x1e, 0xae, 0x7b, 0x16, 0x5b, 0x02, 0x9b, 0x1d, 0xb6, 0x90, 0xe7, + 0x26, 0x4b, 0x4d, 0x0d, 0xbb, 0x07, 0xac, 0x65, 0xb0, 0xe1, 0x1c, 0x0a, 0xe8, 0x0e, 0xa0, 0xb6, + 0xb1, 0xbf, 0xdc, 0xee, 0x84, 0xbd, 0xa5, 0xae, 0xb3, 0x2b, 0xb8, 0xc6, 0x14, 0x9b, 0x0b, 0x7e, + 0x0f, 0xcb, 0x40, 0x71, 0x4e, 0x0b, 0xfd, 0x60, 0x08, 0x26, 0x6a, 0x9e, 0x6b, 0xd9, 0xec, 0x1a, + 0xf6, 0x6c, 0x42, 0xe7, 0xfb, 0x84, 0xca, 0xc7, 0x1f, 0x1d, 0x54, 0xa7, 0xa3, 0x8a, 0x0a, 0x63, + 0x7f, 0x3e, 0x52, 0xb4, 0xf0, 0x8b, 0xfd, 0x7b, 0x93, 0x1a, 0x92, 0x47, 0x07, 0xd5, 0x0b, 0x51, + 0xb3, 0xa4, 0xd2, 0x84, 0xce, 0x1d, 0x95, 0xe6, 0x37, 0x7c, 0xc3, 0x0d, 0xec, 0x01, 0xee, 0x4f, + 0xd1, 0xcd, 0x78, 0x25, 0x83, 0x0d, 0xe7, 0x50, 0x40, 0x6f, 0xc0, 0x0c, 0x2d, 0xdd, 0xec, 0x58, + 0x46, 0x48, 0x4a, 0x5e, 0x9b, 0xae, 0x0a, 0x9a, 0x33, 0x2b, 0x09, 0x4c, 0x38, 0x85, 0x99, 0xeb, + 0xc8, 0x8d, 0xc0, 0x73, 0x19, 0xbb, 0x48, 0xe8, 0xc8, 0x69, 0x29, 0x16, 0x50, 0xf4, 0x0c, 0x8c, + 0xb5, 0x49, 0x10, 0x18, 0x2d, 0xc2, 0xf6, 0xff, 0x44, 0x7c, 0xc8, 0xaf, 0xf2, 0x62, 0x2c, 0xe1, + 0xe8, 0x83, 0x30, 0x62, 0x7a, 0x16, 0x09, 0xe6, 0xc6, 0xd8, 0x0a, 0xa5, 0x5f, 0x7b, 0xa4, 0x46, + 0x0b, 0x1e, 0x1d, 0x54, 0x27, 0x98, 0x1e, 0x81, 0xfe, 0xc2, 0xbc, 0x92, 0xfe, 0x33, 0x54, 0xe6, + 0x4e, 0x5d, 0x32, 0x8e, 0xa1, 0xdb, 0x3f, 0x3f, 0x35, 0xb9, 0xfe, 0x39, 0x7a, 0xe1, 0xf1, 0xdc, + 0xd0, 0xf7, 0x9c, 0x75, 0xc7, 0x70, 0x09, 0xfa, 0x61, 0x0d, 0x66, 0x77, 0xec, 0xd6, 0x8e, 0xfa, + 0x38, 0x27, 0x0e, 0xe6, 0x52, 0x77, 0x93, 0x7b, 0x29, 0x5c, 0x4b, 0x97, 0x0f, 0x0f, 0xaa, 0xb3, + 0xe9, 0x52, 0x9c, 0xa1, 0xa9, 0x7f, 0xa6, 0x02, 0x97, 0x45, 0xcf, 0x1c, 0x7a, 0x52, 0x76, 0x1c, + 0xaf, 0xd7, 0x26, 0xee, 0x79, 0xbc, 0xa3, 0xc9, 0x2f, 0x54, 0x29, 0xfc, 0x42, 0xed, 0xcc, 0x17, + 0x1a, 0x2a, 0xf3, 0x85, 0xa2, 0x85, 0x7c, 0xc4, 0x57, 0xfa, 0x33, 0x0d, 0xe6, 0xf2, 0xe6, 0xe2, + 0x1c, 0xee, 0x70, 0xed, 0xe4, 0x1d, 0xee, 0x5e, 0xd9, 0x4b, 0x79, 0xba, 0xeb, 0x05, 0x77, 0xb9, + 0x3f, 0xad, 0xc0, 0xd5, 0xb8, 0x7a, 0xc3, 0x0d, 0x42, 0xc3, 0x71, 0xb8, 0x9a, 0xea, 0xec, 0xbf, + 0x7b, 0x27, 0x71, 0x15, 0x5f, 0x1b, 0x6c, 0xa8, 0x6a, 0xdf, 0x0b, 0x35, 0xe5, 0xfb, 0x29, 0x4d, + 0xf9, 0xfa, 0x29, 0xd2, 0xec, 0xaf, 0x34, 0xff, 0x6f, 0x1a, 0xcc, 0xe7, 0x37, 0x3c, 0x87, 0x45, + 0xe5, 0x25, 0x17, 0xd5, 0x77, 0x9c, 0xde, 0xa8, 0x0b, 0x96, 0xd5, 0x2f, 0x57, 0x8a, 0x46, 0xcb, + 0x94, 0x05, 0xdb, 0x70, 0x81, 0xde, 0xe2, 0x82, 0x50, 0xa8, 0x74, 0x4f, 0x66, 0xeb, 0x20, 0x75, + 0x5c, 0x17, 0x70, 0x12, 0x07, 0x4e, 0x23, 0x45, 0x6b, 0x30, 0x46, 0xaf, 0x6e, 0x14, 0x7f, 0xe5, + 0xf8, 0xf8, 0xa3, 0xd3, 0xa8, 0xc9, 0xdb, 0x62, 0x89, 0x04, 0x7d, 0x37, 0x4c, 0x5b, 0xd1, 0x8e, + 0x3a, 0xe2, 0xa1, 0x33, 0x8d, 0x95, 0x29, 0xdf, 0xeb, 0x6a, 0x6b, 0x9c, 0x44, 0xa6, 0xff, 0xa5, + 0x06, 0x8f, 0xf7, 0x5b, 0x5b, 0xe8, 0x4d, 0x00, 0x53, 0x8a, 0x17, 0xdc, 0xd4, 0xa5, 0xa4, 0x7a, + 0x3e, 0x12, 0x52, 0xe2, 0x0d, 0x1a, 0x15, 0x05, 0x58, 0x21, 0x92, 0xf3, 0x7e, 0x5a, 0x39, 0xa3, + 0xf7, 0x53, 0xfd, 0xbf, 0x6b, 0x2a, 0x2b, 0x52, 0xbf, 0xed, 0xbb, 0x8d, 0x15, 0xa9, 0x7d, 0x2f, + 0xd4, 0x0f, 0xfe, 0x41, 0x05, 0x6e, 0xe6, 0x37, 0x51, 0xce, 0xde, 0x4f, 0xc0, 0x68, 0x87, 0xdb, + 0x23, 0x0d, 0xb1, 0xb3, 0xf1, 0x69, 0xca, 0x59, 0xb8, 0xb5, 0xd0, 0xa3, 0x83, 0xea, 0x7c, 0x1e, + 0xa3, 0x17, 0x76, 0x46, 0xa2, 0x1d, 0xb2, 0x53, 0x5a, 0x12, 0x2e, 0xfd, 0x7d, 0xeb, 0x31, 0x99, + 0x8b, 0xb1, 0x45, 0x9c, 0x63, 0x2b, 0x46, 0x3e, 0xa9, 0xc1, 0x4c, 0x62, 0x45, 0x07, 0x73, 0x23, + 0x6c, 0x8d, 0x96, 0x7a, 0xba, 0x4a, 0x6c, 0x95, 0xf8, 0xe4, 0x4e, 0x14, 0x07, 0x38, 0x45, 0x30, + 0xc5, 0x66, 0xd5, 0x59, 0x7d, 0xd7, 0xb1, 0x59, 0xb5, 0xf3, 0x05, 0x6c, 0xf6, 0xa7, 0x2b, 0x45, + 0xa3, 0x65, 0x6c, 0xf6, 0x21, 0x4c, 0x48, 0x4b, 0x5d, 0xc9, 0x2e, 0xee, 0x0c, 0xda, 0x27, 0x8e, + 0x2e, 0x36, 0xdb, 0x90, 0x25, 0x01, 0x8e, 0x69, 0xa1, 0x1f, 0xd2, 0x00, 0xe2, 0x0f, 0x23, 0x36, + 0xd5, 0xc6, 0xe9, 0x4d, 0x87, 0x22, 0xd6, 0xcc, 0xd0, 0x2d, 0xad, 0x2c, 0x0a, 0x85, 0xae, 0xfe, + 0xbf, 0x87, 0x00, 0x65, 0xfb, 0x4e, 0xc5, 0xcd, 0x5d, 0xdb, 0xb5, 0xd2, 0x17, 0x82, 0xfb, 0xb6, + 0x6b, 0x61, 0x06, 0x39, 0x86, 0x40, 0xfa, 0x02, 0x5c, 0x68, 0x39, 0xde, 0x96, 0xe1, 0x38, 0x3d, + 0x61, 0xba, 0x2a, 0x8c, 0x20, 0x2f, 0xd1, 0x83, 0xe9, 0x6e, 0x12, 0x84, 0xd3, 0x75, 0x51, 0x07, + 0x66, 0x7d, 0x7a, 0x15, 0x37, 0x6d, 0x87, 0x5d, 0x9d, 0xbc, 0x6e, 0x58, 0x52, 0xd7, 0xc3, 0xc4, + 0x7b, 0x9c, 0xc2, 0x85, 0x33, 0xd8, 0xd1, 0xfb, 0x60, 0xac, 0xe3, 0xdb, 0x6d, 0xc3, 0xef, 0xb1, + 0xcb, 0xd9, 0xf8, 0xd2, 0x24, 0x3d, 0xe1, 0xd6, 0x79, 0x11, 0x96, 0x30, 0xf4, 0x7d, 0x30, 0xe1, + 0xd8, 0xdb, 0xc4, 0xec, 0x99, 0x0e, 0x11, 0xca, 0x99, 0x07, 0xa7, 0xb3, 0x64, 0x56, 0x24, 0x5a, + 0xf1, 0x24, 0x2c, 0x7f, 0xe2, 0x98, 0x20, 0x6a, 0xc0, 0xa5, 0x87, 0x9e, 0xbf, 0x4b, 0x7c, 0x87, + 0x04, 0x41, 0xb3, 0xdb, 0xe9, 0x78, 0x7e, 0x48, 0x2c, 0xa6, 0xc2, 0x19, 0xe7, 0xf6, 0xb9, 0xaf, + 0x64, 0xc1, 0x38, 0xaf, 0x8d, 0xfe, 0x4e, 0x05, 0xae, 0xf7, 0xe9, 0x04, 0xc2, 0x74, 0x6f, 0x88, + 0x39, 0x12, 0x2b, 0xe1, 0x43, 0x7c, 0x3d, 0x8b, 0xc2, 0x47, 0x07, 0xd5, 0x27, 0xfb, 0x20, 0x68, + 0xd2, 0xa5, 0x48, 0x5a, 0x3d, 0x1c, 0xa3, 0x41, 0x0d, 0x18, 0xb5, 0x62, 0x8d, 0xe6, 0xc4, 0xd2, + 0xb3, 0x94, 0x5b, 0x73, 0xdd, 0xc3, 0x71, 0xb1, 0x09, 0x04, 0x68, 0x05, 0xc6, 0xf8, 0x43, 0x32, + 0x11, 0x9c, 0xff, 0x39, 0x76, 0x3d, 0xe6, 0x45, 0xc7, 0x45, 0x26, 0x51, 0xe8, 0xff, 0x4b, 0x83, + 0xb1, 0x9a, 0xe7, 0x93, 0xfa, 0x5a, 0x13, 0xf5, 0x60, 0x52, 0x71, 0x21, 0x10, 0x5c, 0xb0, 0x24, + 0x5b, 0x60, 0x18, 0x17, 0x63, 0x6c, 0xd2, 0xdc, 0x35, 0x2a, 0xc0, 0x2a, 0x2d, 0xf4, 0x26, 0x9d, + 0xf3, 0x87, 0xbe, 0x1d, 0x52, 0xc2, 0x83, 0xbc, 0xbf, 0x71, 0xc2, 0x58, 0xe2, 0xe2, 0x2b, 0x2a, + 0xfa, 0x89, 0x63, 0x2a, 0xfa, 0x3a, 0xe5, 0x00, 0xe9, 0x6e, 0xa2, 0xdb, 0x30, 0xdc, 0xf6, 0x2c, + 0xf9, 0xdd, 0xdf, 0x2f, 0xf7, 0xf7, 0xaa, 0x67, 0xd1, 0xb9, 0xbd, 0x9a, 0x6d, 0xc1, 0xb4, 0x84, + 0xac, 0x8d, 0xbe, 0x06, 0xb3, 0x69, 0xfa, 0xe8, 0x36, 0xcc, 0x98, 0x5e, 0xbb, 0xed, 0xb9, 0xcd, + 0xee, 0xf6, 0xb6, 0xbd, 0x4f, 0x12, 0x76, 0xc8, 0xb5, 0x04, 0x04, 0xa7, 0x6a, 0xea, 0x3f, 0xa5, + 0xc1, 0x10, 0xfd, 0x2e, 0x3a, 0x8c, 0x5a, 0x5e, 0xdb, 0xb0, 0x5d, 0xd1, 0x2b, 0x66, 0x73, 0x5d, + 0x67, 0x25, 0x58, 0x40, 0x50, 0x07, 0x26, 0xa4, 0xd0, 0x34, 0x90, 0x2d, 0x4c, 0x7d, 0xad, 0x19, + 0xd9, 0x0f, 0x46, 0x9c, 0x5c, 0x96, 0x04, 0x38, 0x26, 0xa2, 0x1b, 0x70, 0xb1, 0xbe, 0xd6, 0x6c, + 0xb8, 0xa6, 0xd3, 0xb5, 0xc8, 0xf2, 0x3e, 0xfb, 0x43, 0x79, 0x89, 0xcd, 0x4b, 0xc4, 0x38, 0x19, + 0x2f, 0x11, 0x95, 0xb0, 0x84, 0xd1, 0x6a, 0x84, 0xb7, 0x10, 0xc6, 0xc2, 0xac, 0x9a, 0x40, 0x82, + 0x25, 0x4c, 0xff, 0x6a, 0x05, 0x26, 0x95, 0x0e, 0x21, 0x07, 0xc6, 0xf8, 0x70, 0xa5, 0xad, 0xde, + 0x72, 0xc9, 0x21, 0x26, 0x7b, 0xcd, 0xa9, 0xf3, 0x09, 0x0d, 0xb0, 0x24, 0xa1, 0xf2, 0xc5, 0x4a, + 0x1f, 0xbe, 0xb8, 0x00, 0x10, 0xc4, 0x96, 0xeb, 0x7c, 0x4b, 0xb2, 0xa3, 0x47, 0xb1, 0x57, 0x57, + 0x6a, 0xa0, 0xc7, 0xc5, 0x09, 0xc2, 0x8d, 0x51, 0xc6, 0x53, 0xa7, 0xc7, 0x36, 0x8c, 0xbc, 0xe5, + 0xb9, 0x24, 0x10, 0x6f, 0x70, 0xa7, 0x34, 0xc0, 0x09, 0x2a, 0x1f, 0xbc, 0x46, 0xf1, 0x62, 0x8e, + 0x5e, 0xff, 0x59, 0x0d, 0xa0, 0x6e, 0x84, 0x06, 0x7f, 0x32, 0x3a, 0x86, 0xbd, 0xf7, 0xe3, 0x89, + 0x83, 0x6f, 0x3c, 0x63, 0x03, 0x3b, 0x1c, 0xd8, 0x6f, 0xc9, 0xe1, 0x47, 0x02, 0x35, 0xc7, 0xde, + 0xb4, 0xdf, 0x22, 0x98, 0xc1, 0xd1, 0x07, 0x60, 0x82, 0xb8, 0xa6, 0xdf, 0xeb, 0x50, 0xe6, 0x3d, + 0xcc, 0x66, 0x95, 0xed, 0xd0, 0x65, 0x59, 0x88, 0x63, 0xb8, 0xfe, 0x2c, 0x24, 0x6f, 0x45, 0x47, + 0xf7, 0x52, 0xff, 0xda, 0x30, 0x5c, 0x5b, 0xde, 0xa8, 0xd5, 0x05, 0x3e, 0xdb, 0x73, 0xef, 0x93, + 0xde, 0xdf, 0x98, 0xd7, 0xfc, 0x8d, 0x79, 0xcd, 0x29, 0x9a, 0xd7, 0x3c, 0xd2, 0x60, 0x76, 0x79, + 0xbf, 0x63, 0xfb, 0xcc, 0xcf, 0x80, 0xf8, 0xf4, 0x1a, 0x8b, 0x9e, 0x81, 0xb1, 0x3d, 0xfe, 0xaf, + 0x58, 0x5c, 0x91, 0xaa, 0x40, 0xd4, 0xc0, 0x12, 0x8e, 0xb6, 0x61, 0x86, 0xb0, 0xe6, 0x4c, 0x5e, + 0x35, 0xc2, 0x32, 0x0b, 0x88, 0xbb, 0xb1, 0x24, 0xb0, 0xe0, 0x14, 0x56, 0xd4, 0x84, 0x19, 0xd3, + 0x31, 0x82, 0xc0, 0xde, 0xb6, 0xcd, 0xd8, 0x82, 0x6e, 0x62, 0xe9, 0x03, 0xec, 0xe8, 0x49, 0x40, + 0x1e, 0x1d, 0x54, 0xaf, 0x88, 0x7e, 0x26, 0x01, 0x38, 0x85, 0x42, 0xff, 0x7c, 0x05, 0xa6, 0x97, + 0xf7, 0x3b, 0x5e, 0xd0, 0xf5, 0x09, 0xab, 0x7a, 0x0e, 0x37, 0xf0, 0x67, 0x60, 0x6c, 0xc7, 0x70, + 0x2d, 0x87, 0xf8, 0x82, 0xfb, 0x44, 0x73, 0x7b, 0x8f, 0x17, 0x63, 0x09, 0x47, 0x6f, 0x03, 0x04, + 0xe6, 0x0e, 0xb1, 0xba, 0x4c, 0x82, 0xe1, 0x9b, 0xe4, 0x7e, 0x19, 0x1e, 0x9a, 0x18, 0x63, 0x33, + 0x42, 0x29, 0x38, 0x7b, 0xf4, 0x1b, 0x2b, 0xe4, 0xf4, 0x3f, 0xd4, 0xe0, 0x62, 0xa2, 0xdd, 0x39, + 0x5c, 0x2c, 0xb7, 0x93, 0x17, 0xcb, 0xc5, 0x81, 0xc7, 0x5a, 0x70, 0x9f, 0xfc, 0x91, 0x0a, 0x3c, + 0x56, 0x30, 0x27, 0x19, 0x73, 0x0b, 0xed, 0x9c, 0xcc, 0x2d, 0xba, 0x30, 0x19, 0x7a, 0x8e, 0x30, + 0xf4, 0x94, 0x33, 0x50, 0xca, 0x98, 0x62, 0x23, 0x42, 0x13, 0x1b, 0x53, 0xc4, 0x65, 0x01, 0x56, + 0xe9, 0xe8, 0xbf, 0xa9, 0xc1, 0x44, 0xa4, 0xbf, 0xfa, 0x86, 0x7a, 0x43, 0x3a, 0xbe, 0xe7, 0x9d, + 0xfe, 0x3b, 0x15, 0xb8, 0x1a, 0xe1, 0x96, 0xf7, 0x84, 0x66, 0x48, 0xf9, 0xc6, 0xd1, 0x97, 0xe0, + 0xc7, 0xc5, 0x39, 0xac, 0xc8, 0x02, 0x8a, 0xa4, 0x40, 0xe5, 0xa6, 0xae, 0xdf, 0xf1, 0x02, 0x29, + 0x0e, 0x70, 0xb9, 0x89, 0x17, 0x61, 0x09, 0x43, 0x6b, 0x30, 0x12, 0x50, 0x7a, 0xe2, 0x34, 0x39, + 0xe1, 0x6c, 0x30, 0x89, 0x86, 0xf5, 0x17, 0x73, 0x34, 0xe8, 0x6d, 0x55, 0xa5, 0x31, 0x52, 0x5e, + 0xcd, 0x42, 0x47, 0x62, 0xc9, 0x19, 0xc9, 0xf1, 0x46, 0xc9, 0x53, 0x6b, 0xe8, 0x2b, 0x30, 0x2b, + 0x2c, 0x36, 0xf8, 0xb2, 0x71, 0x4d, 0x82, 0x3e, 0x9a, 0x58, 0x19, 0x4f, 0xa5, 0x5e, 0x91, 0x2f, + 0xa7, 0xeb, 0xc7, 0x2b, 0x46, 0x0f, 0x60, 0xfc, 0xae, 0xe8, 0x24, 0x9a, 0x87, 0x8a, 0x2d, 0xbf, + 0x05, 0x08, 0x1c, 0x95, 0x46, 0x1d, 0x57, 0x6c, 0x2b, 0x92, 0x87, 0x2a, 0x85, 0x52, 0x9b, 0x72, + 0x2c, 0x0d, 0xf5, 0x3f, 0x96, 0xf4, 0x3f, 0xa9, 0xc0, 0x65, 0x49, 0x55, 0x8e, 0xb1, 0x2e, 0xde, + 0xe0, 0x8e, 0x90, 0x0d, 0x8f, 0x56, 0x8a, 0x3c, 0x80, 0x61, 0xc6, 0x00, 0x4b, 0xbd, 0xcd, 0x45, + 0x08, 0x69, 0x77, 0x30, 0x43, 0x84, 0xbe, 0x0f, 0x46, 0x1d, 0x63, 0x8b, 0x38, 0xd2, 0x52, 0xae, + 0x94, 0x0a, 0x29, 0x6f, 0xb8, 0x5c, 0xb3, 0x19, 0x70, 0x6f, 0x80, 0xe8, 0xc9, 0x86, 0x17, 0x62, + 0x41, 0x73, 0xfe, 0x79, 0x98, 0x54, 0xaa, 0xa1, 0x59, 0x18, 0xda, 0x25, 0xfc, 0x6d, 0x76, 0x02, + 0xd3, 0x7f, 0xd1, 0x65, 0x18, 0xd9, 0x33, 0x9c, 0xae, 0x98, 0x12, 0xcc, 0x7f, 0xdc, 0xae, 0x7c, + 0x54, 0xd3, 0x7f, 0x51, 0x83, 0xc9, 0x7b, 0xf6, 0x16, 0xf1, 0xb9, 0xd9, 0x05, 0xbb, 0x0a, 0x25, + 0x1c, 0x9f, 0x27, 0xf3, 0x9c, 0x9e, 0xd1, 0x3e, 0x4c, 0x88, 0x93, 0x26, 0xb2, 0xca, 0xbd, 0x5b, + 0xee, 0x11, 0x38, 0x22, 0x2d, 0x38, 0xb8, 0xea, 0x68, 0x25, 0x29, 0xe0, 0x98, 0x98, 0xfe, 0x36, + 0x5c, 0xca, 0x69, 0x84, 0xaa, 0x6c, 0xfb, 0xfa, 0xa1, 0x58, 0x16, 0x72, 0x3f, 0xfa, 0x21, 0xe6, + 0xe5, 0xe8, 0x1a, 0x0c, 0x11, 0xd7, 0x12, 0x6b, 0x62, 0xec, 0xf0, 0xa0, 0x3a, 0xb4, 0xec, 0x5a, + 0x98, 0x96, 0x51, 0x36, 0xe5, 0x78, 0x09, 0x99, 0x84, 0xb1, 0xa9, 0x15, 0x51, 0x86, 0x23, 0x28, + 0x7b, 0xb6, 0x4f, 0xbf, 0x50, 0x53, 0xe9, 0x74, 0x76, 0x3b, 0xb5, 0x7b, 0x06, 0x79, 0x18, 0x4f, + 0xef, 0xc4, 0xa5, 0x39, 0x31, 0x21, 0x99, 0x3d, 0x8d, 0x33, 0x74, 0xf5, 0x5f, 0x1b, 0x86, 0x27, + 0xee, 0x79, 0xbe, 0xfd, 0x96, 0xe7, 0x86, 0x86, 0xb3, 0xee, 0x59, 0xb1, 0x81, 0x9d, 0x60, 0xca, + 0x9f, 0xd6, 0xe0, 0x31, 0xb3, 0xd3, 0xe5, 0xd2, 0xad, 0xb4, 0x7b, 0x5a, 0x27, 0xbe, 0xed, 0x95, + 0xb5, 0xb3, 0x63, 0xae, 0xb5, 0xb5, 0xf5, 0xcd, 0x3c, 0x94, 0xb8, 0x88, 0x16, 0x33, 0xf7, 0xb3, + 0xbc, 0x87, 0x2e, 0xeb, 0x5c, 0x33, 0x64, 0xb3, 0xf9, 0x56, 0xfc, 0x11, 0x4a, 0x9a, 0xfb, 0xd5, + 0x73, 0x31, 0xe2, 0x02, 0x4a, 0xe8, 0x07, 0xe0, 0x8a, 0xcd, 0x3b, 0x87, 0x89, 0x61, 0xd9, 0x2e, + 0x09, 0x02, 0x6e, 0x2b, 0x34, 0x80, 0x3d, 0x5b, 0x23, 0x0f, 0x21, 0xce, 0xa7, 0x83, 0x5e, 0x07, + 0x08, 0x7a, 0xae, 0x29, 0xe6, 0x7f, 0xa4, 0x14, 0x55, 0x2e, 0x04, 0x46, 0x58, 0xb0, 0x82, 0x91, + 0xde, 0x70, 0xc3, 0x68, 0x51, 0x8e, 0x32, 0xdb, 0x38, 0x76, 0xc3, 0x8d, 0xd7, 0x50, 0x0c, 0xd7, + 0xff, 0x99, 0x06, 0x63, 0xc2, 0x7d, 0x1f, 0xbd, 0x3f, 0xa5, 0xe5, 0x89, 0x78, 0x4f, 0x4a, 0xd3, + 0xd3, 0x63, 0x4f, 0x7d, 0x42, 0xc3, 0x27, 0x44, 0x89, 0x52, 0x6a, 0x02, 0x41, 0x38, 0x56, 0x17, + 0x26, 0x9e, 0xfc, 0xa4, 0x0a, 0x51, 0x21, 0xa6, 0x7f, 0x51, 0x83, 0x8b, 0x99, 0x56, 0xc7, 0x90, + 0x17, 0xce, 0xd1, 0x8a, 0xe6, 0x0f, 0x86, 0x61, 0x86, 0x19, 0xfb, 0xb9, 0x86, 0xc3, 0x15, 0x30, + 0xe7, 0x70, 0x41, 0xf9, 0x00, 0x4c, 0xd8, 0xed, 0x76, 0x37, 0xa4, 0xac, 0x5a, 0xe8, 0xd0, 0xd9, + 0x37, 0x6f, 0xc8, 0x42, 0x1c, 0xc3, 0x91, 0x2b, 0x8e, 0x42, 0xce, 0xc4, 0x57, 0xca, 0x7d, 0x39, + 0x75, 0x80, 0x0b, 0xf4, 0xd8, 0xe2, 0xe7, 0x55, 0xde, 0x49, 0xf9, 0xc3, 0x1a, 0x40, 0x10, 0xfa, + 0xb6, 0xdb, 0xa2, 0x85, 0xe2, 0xb8, 0xc4, 0xa7, 0x40, 0xb6, 0x19, 0x21, 0xe5, 0xc4, 0xa3, 0x39, + 0x8a, 0x01, 0x58, 0xa1, 0x8c, 0x16, 0x85, 0x94, 0xc0, 0x39, 0xfe, 0x37, 0xa7, 0xe4, 0xa1, 0x27, + 0xb2, 0xd1, 0x69, 0x84, 0x4b, 0x67, 0x2c, 0x46, 0xcc, 0x7f, 0x04, 0x26, 0x22, 0x7a, 0x47, 0x9d, + 0xba, 0x53, 0xca, 0xa9, 0x3b, 0xff, 0x02, 0x5c, 0x48, 0x75, 0xf7, 0x44, 0x87, 0xf6, 0x7f, 0xd0, + 0x00, 0x25, 0x47, 0x7f, 0x0e, 0x57, 0xbb, 0x56, 0xf2, 0x6a, 0xb7, 0x34, 0xf8, 0x27, 0x2b, 0xb8, + 0xdb, 0x7d, 0x65, 0x1a, 0x58, 0x74, 0x93, 0x28, 0x7a, 0x8c, 0x38, 0xb8, 0xe8, 0x39, 0x1b, 0x7b, + 0x48, 0x88, 0x9d, 0x3b, 0xc0, 0x39, 0x7b, 0x3f, 0x85, 0x2b, 0x3e, 0x67, 0xd3, 0x10, 0x9c, 0xa1, + 0x8b, 0x3e, 0xa3, 0xc1, 0xac, 0x91, 0x8c, 0x6e, 0x22, 0x67, 0xa6, 0x94, 0xf7, 0x6c, 0x2a, 0x52, + 0x4a, 0xdc, 0x97, 0x14, 0x20, 0xc0, 0x19, 0xb2, 0xe8, 0x43, 0x30, 0x65, 0x74, 0xec, 0xc5, 0xae, + 0x65, 0xd3, 0xab, 0x81, 0x0c, 0x4d, 0xc1, 0xae, 0xab, 0x8b, 0xeb, 0x8d, 0xa8, 0x1c, 0x27, 0x6a, + 0x45, 0x61, 0x44, 0xc4, 0x44, 0x0e, 0x0f, 0x18, 0x46, 0x44, 0xcc, 0x61, 0x1c, 0x46, 0x44, 0x4c, + 0x9d, 0x4a, 0x04, 0xb9, 0x00, 0x9e, 0x6d, 0x99, 0x82, 0x24, 0x7f, 0xb5, 0x2b, 0x75, 0x43, 0x7e, + 0xd0, 0xa8, 0xd7, 0x04, 0x45, 0x76, 0xfa, 0xc5, 0xbf, 0xb1, 0x42, 0x01, 0x7d, 0x4e, 0x83, 0x69, + 0xc1, 0xbb, 0x05, 0xcd, 0x31, 0xf6, 0x89, 0x5e, 0x2b, 0xbb, 0x5e, 0x52, 0x6b, 0x72, 0x01, 0xab, + 0xc8, 0x39, 0xdf, 0x89, 0x1c, 0x6c, 0x12, 0x30, 0x9c, 0xec, 0x07, 0xfa, 0x7b, 0x1a, 0x5c, 0x0e, + 0x88, 0xbf, 0x67, 0x9b, 0x64, 0xd1, 0x34, 0xbd, 0xae, 0x2b, 0xbf, 0xc3, 0x78, 0xf9, 0xa8, 0x0b, + 0xcd, 0x1c, 0x7c, 0xdc, 0xb2, 0x3b, 0x0f, 0x82, 0x73, 0xe9, 0x53, 0xb1, 0xec, 0xc2, 0x43, 0x23, + 0x34, 0x77, 0x6a, 0x86, 0xb9, 0xc3, 0x74, 0xe5, 0xdc, 0x98, 0xbb, 0xe4, 0xba, 0x7e, 0x25, 0x89, + 0x8a, 0xbf, 0x3a, 0xa7, 0x0a, 0x71, 0x9a, 0x20, 0xf2, 0x60, 0xdc, 0x17, 0x21, 0xa3, 0xe6, 0xa0, + 0xbc, 0x48, 0x91, 0x89, 0x3f, 0xc5, 0x05, 0x7b, 0xf9, 0x0b, 0x47, 0x44, 0x50, 0x0b, 0x9e, 0xe0, + 0x57, 0x9b, 0x45, 0xd7, 0x73, 0x7b, 0x6d, 0xaf, 0x1b, 0x2c, 0x76, 0xc3, 0x1d, 0xe2, 0x86, 0x52, + 0x57, 0x39, 0xc9, 0x8e, 0x51, 0x66, 0xcf, 0xbe, 0xdc, 0xaf, 0x22, 0xee, 0x8f, 0x07, 0xbd, 0x0a, + 0xe3, 0x64, 0x8f, 0xb8, 0xe1, 0xc6, 0xc6, 0x0a, 0xb3, 0x0b, 0x3f, 0xb9, 0xb4, 0xc7, 0x86, 0xb0, + 0x2c, 0x70, 0xe0, 0x08, 0x1b, 0xda, 0x85, 0x31, 0x87, 0xc7, 0xfc, 0x9a, 0x9b, 0x2e, 0xcf, 0x14, + 0xd3, 0xf1, 0xc3, 0xf8, 0xfd, 0x4f, 0xfc, 0xc0, 0x92, 0x02, 0xea, 0xc0, 0x4d, 0x8b, 0x6c, 0x1b, + 0x5d, 0x27, 0x5c, 0xf3, 0x42, 0x2a, 0xd2, 0xf6, 0x62, 0xfd, 0x94, 0x74, 0x01, 0x98, 0x61, 0x0e, + 0xd2, 0x4f, 0x1d, 0x1e, 0x54, 0x6f, 0xd6, 0x8f, 0xa8, 0x8b, 0x8f, 0xc4, 0x86, 0x7a, 0xf0, 0xa4, + 0xa8, 0xb3, 0xe9, 0xfa, 0xc4, 0x30, 0x77, 0xe8, 0x2c, 0x67, 0x89, 0x5e, 0x60, 0x44, 0xff, 0xbf, + 0xc3, 0x83, 0xea, 0x93, 0xf5, 0xa3, 0xab, 0xe3, 0xe3, 0xe0, 0x9c, 0xff, 0x04, 0xa0, 0xec, 0x3e, + 0x3f, 0xea, 0xc0, 0x1e, 0x57, 0x0f, 0xec, 0x2f, 0x8c, 0xc0, 0x75, 0xca, 0x3e, 0x62, 0x31, 0x75, + 0xd5, 0x70, 0x8d, 0xd6, 0x37, 0xe6, 0xd1, 0xf6, 0x8b, 0x1a, 0x3c, 0xb6, 0x93, 0x7f, 0x85, 0x14, + 0x82, 0xf2, 0x4b, 0xa5, 0xae, 0xfa, 0xfd, 0x6e, 0xa5, 0x7c, 0x67, 0xf5, 0xad, 0x82, 0x8b, 0x3a, + 0x85, 0x3e, 0x01, 0xb3, 0xae, 0x67, 0x91, 0x5a, 0xa3, 0x8e, 0x57, 0x8d, 0x60, 0xb7, 0x29, 0x5f, + 0xfe, 0x46, 0xb8, 0xcd, 0xc9, 0x5a, 0x0a, 0x86, 0x33, 0xb5, 0xd1, 0x1e, 0xa0, 0x8e, 0x67, 0x2d, + 0xef, 0xd9, 0xa6, 0x7c, 0x73, 0x2a, 0x6f, 0xe7, 0xc2, 0x1e, 0xb6, 0xd6, 0x33, 0xd8, 0x70, 0x0e, + 0x05, 0x76, 0x07, 0xa6, 0x9d, 0x59, 0xf5, 0x5c, 0x3b, 0xf4, 0x7c, 0xe6, 0x07, 0x33, 0xd0, 0x55, + 0x90, 0xdd, 0x81, 0xd7, 0x72, 0x31, 0xe2, 0x02, 0x4a, 0xfa, 0xff, 0xd0, 0xe0, 0x02, 0x5d, 0x16, + 0xeb, 0xbe, 0xb7, 0xdf, 0xfb, 0x46, 0x5c, 0x90, 0xcf, 0x08, 0x23, 0x08, 0xae, 0xbb, 0xb9, 0xa2, + 0x18, 0x40, 0x4c, 0xb0, 0x3e, 0xc7, 0x36, 0x0f, 0xaa, 0xfa, 0x6a, 0xa8, 0x58, 0x7d, 0xa5, 0x7f, + 0xae, 0xc2, 0x45, 0x4c, 0xa9, 0x3e, 0xfa, 0x86, 0xdc, 0x87, 0x1f, 0x81, 0x69, 0x5a, 0xb6, 0x6a, + 0xec, 0xaf, 0xd7, 0x5f, 0xf6, 0x1c, 0xe9, 0xca, 0xc3, 0xcc, 0x73, 0xef, 0xab, 0x00, 0x9c, 0xac, + 0x87, 0x6e, 0xc3, 0x58, 0x87, 0x3b, 0x3c, 0x8b, 0xcb, 0xcd, 0x4d, 0x6e, 0x29, 0xc0, 0x8a, 0x1e, + 0x1d, 0x54, 0x2f, 0xc6, 0x8f, 0x25, 0xa2, 0x10, 0xcb, 0x06, 0xfa, 0x67, 0xaf, 0x00, 0x43, 0xee, + 0x90, 0xf0, 0x1b, 0x71, 0x4e, 0x9e, 0x85, 0x49, 0xb3, 0xd3, 0xad, 0xdd, 0x69, 0xbe, 0xd4, 0xf5, + 0xd8, 0xa5, 0x95, 0xc5, 0x66, 0xa4, 0x32, 0x67, 0x6d, 0x7d, 0x53, 0x16, 0x63, 0xb5, 0x0e, 0xe5, + 0x0e, 0x66, 0xa7, 0x2b, 0xf8, 0xed, 0xba, 0x6a, 0xa3, 0xca, 0xb8, 0x43, 0x6d, 0x7d, 0x33, 0x01, + 0xc3, 0x99, 0xda, 0xe8, 0x07, 0x60, 0x8a, 0x88, 0x8d, 0x7b, 0xcf, 0xf0, 0x2d, 0xc1, 0x17, 0x1a, + 0x65, 0x07, 0x1f, 0x4d, 0xad, 0xe4, 0x06, 0x5c, 0x54, 0x5f, 0x56, 0x48, 0xe0, 0x04, 0x41, 0xf4, + 0x5d, 0x70, 0x4d, 0xfe, 0xa6, 0x5f, 0xd9, 0xb3, 0xd2, 0x8c, 0x62, 0x84, 0xfb, 0x98, 0x2e, 0x17, + 0x55, 0xc2, 0xc5, 0xed, 0xd1, 0x2f, 0x68, 0x70, 0x35, 0x82, 0xda, 0xae, 0xdd, 0xee, 0xb6, 0x31, + 0x31, 0x1d, 0xc3, 0x6e, 0x0b, 0x01, 0xfd, 0x95, 0x53, 0x1b, 0x68, 0x12, 0x3d, 0x67, 0x56, 0xf9, + 0x30, 0x5c, 0xd0, 0x25, 0xf4, 0x45, 0x0d, 0x6e, 0x4a, 0xd0, 0xba, 0x4f, 0x82, 0xa0, 0xeb, 0x93, + 0xd8, 0x91, 0x4c, 0x4c, 0xc9, 0x58, 0x29, 0xde, 0xc9, 0x24, 0x95, 0xe5, 0x23, 0x70, 0xe3, 0x23, + 0xa9, 0xab, 0xcb, 0xa5, 0xe9, 0x6d, 0x87, 0x42, 0xa2, 0x3f, 0xab, 0xe5, 0x42, 0x49, 0xe0, 0x04, + 0x41, 0xf4, 0xcf, 0x35, 0x78, 0x4c, 0x2d, 0x50, 0x57, 0x0b, 0x17, 0xe5, 0x5f, 0x3d, 0xb5, 0xce, + 0xa4, 0xf0, 0x73, 0x5d, 0x70, 0x01, 0x10, 0x17, 0xf5, 0x8a, 0xb2, 0xed, 0x36, 0x5b, 0x98, 0x5c, + 0xdc, 0x1f, 0xe1, 0x6c, 0x9b, 0xaf, 0xd5, 0x00, 0x4b, 0x18, 0xbd, 0xe8, 0x76, 0x3c, 0x6b, 0xdd, + 0xb6, 0x82, 0x15, 0xbb, 0x6d, 0x87, 0x4c, 0x28, 0x1f, 0xe2, 0xd3, 0xb1, 0xee, 0x59, 0xeb, 0x8d, + 0x3a, 0x2f, 0xc7, 0x89, 0x5a, 0xcc, 0xa5, 0xdb, 0x6e, 0x1b, 0x2d, 0xb2, 0xde, 0x75, 0x9c, 0x75, + 0xdf, 0x63, 0x0a, 0xc3, 0x3a, 0x31, 0x2c, 0xc7, 0x76, 0x49, 0x49, 0x21, 0x9c, 0x6d, 0xb7, 0x46, + 0x11, 0x52, 0x5c, 0x4c, 0x0f, 0x2d, 0x00, 0x6c, 0x1b, 0xb6, 0xd3, 0x7c, 0x68, 0x74, 0x1e, 0xb8, + 0x4c, 0x52, 0x1f, 0xe7, 0x57, 0xd8, 0x3b, 0x51, 0x29, 0x56, 0x6a, 0xd0, 0xd5, 0x44, 0xb9, 0x20, + 0x26, 0x3c, 0x94, 0x10, 0x93, 0xaa, 0x4f, 0x63, 0x35, 0x49, 0x84, 0x7c, 0xfa, 0xee, 0x2b, 0x24, + 0x70, 0x82, 0x20, 0xfa, 0xb4, 0x06, 0x33, 0x41, 0x2f, 0x08, 0x49, 0x3b, 0xea, 0xc3, 0x85, 0xd3, + 0xee, 0x03, 0x53, 0xa5, 0x36, 0x13, 0x44, 0x70, 0x8a, 0x28, 0x32, 0xe0, 0x3a, 0x9b, 0xd5, 0xbb, + 0xb5, 0x7b, 0x76, 0x6b, 0x27, 0x72, 0xd4, 0x5e, 0x27, 0xbe, 0x49, 0xdc, 0x70, 0x6e, 0x96, 0xad, + 0x1b, 0x66, 0x4a, 0xd3, 0x28, 0xae, 0x86, 0xfb, 0xe1, 0x40, 0xaf, 0xc3, 0xbc, 0x00, 0xaf, 0x78, + 0x0f, 0x33, 0x14, 0x2e, 0x32, 0x0a, 0xcc, 0x74, 0xa8, 0x51, 0x58, 0x0b, 0xf7, 0xc1, 0x80, 0x1a, + 0x70, 0x29, 0x20, 0x3e, 0x7b, 0x09, 0x21, 0xd1, 0xe2, 0x09, 0xe6, 0x50, 0x6c, 0x35, 0xdc, 0xcc, + 0x82, 0x71, 0x5e, 0x1b, 0xf4, 0x42, 0xe4, 0x98, 0xd4, 0xa3, 0x05, 0x2f, 0xad, 0x37, 0xe7, 0x2e, + 0xb1, 0xfe, 0x5d, 0x52, 0xfc, 0x8d, 0x24, 0x08, 0xa7, 0xeb, 0x52, 0xd9, 0x42, 0x16, 0x2d, 0x75, + 0xfd, 0x20, 0x9c, 0xbb, 0xcc, 0x1a, 0x33, 0xd9, 0x02, 0xab, 0x00, 0x9c, 0xac, 0x87, 0x6e, 0xc3, + 0x4c, 0x40, 0x4c, 0xd3, 0x6b, 0x77, 0xc4, 0xf5, 0x6a, 0xee, 0x0a, 0xeb, 0x3d, 0xff, 0x82, 0x09, + 0x08, 0x4e, 0xd5, 0x44, 0x3d, 0xb8, 0x14, 0x05, 0xd6, 0x59, 0xf1, 0x5a, 0xab, 0xc6, 0x3e, 0x13, + 0xd5, 0xaf, 0x1e, 0xbd, 0x03, 0x17, 0xe4, 0xd3, 0xf6, 0xc2, 0x4b, 0x5d, 0xc3, 0x0d, 0xed, 0xb0, + 0xc7, 0xa7, 0xab, 0x96, 0x45, 0x87, 0xf3, 0x68, 0xa0, 0x15, 0xb8, 0x9c, 0x2a, 0xbe, 0x63, 0x3b, + 0x24, 0x98, 0x7b, 0x8c, 0x0d, 0x9b, 0xe9, 0x48, 0x6a, 0x39, 0x70, 0x9c, 0xdb, 0x0a, 0x3d, 0x80, + 0x2b, 0x1d, 0xdf, 0x0b, 0x89, 0x19, 0xde, 0xa7, 0xe2, 0x89, 0x23, 0x06, 0x18, 0xcc, 0xcd, 0xb1, + 0xb9, 0x60, 0xaf, 0x40, 0xeb, 0x79, 0x15, 0x70, 0x7e, 0x3b, 0xf4, 0x05, 0x0d, 0x6e, 0x04, 0xa1, + 0x4f, 0x8c, 0xb6, 0xed, 0xb6, 0x6a, 0x9e, 0xeb, 0x12, 0xc6, 0x26, 0x1b, 0x56, 0x6c, 0x74, 0x7f, + 0xad, 0x14, 0x9f, 0xd2, 0x0f, 0x0f, 0xaa, 0x37, 0x9a, 0x7d, 0x31, 0xe3, 0x23, 0x28, 0xa3, 0xb7, + 0x01, 0xda, 0xa4, 0xed, 0xf9, 0x3d, 0xca, 0x91, 0xe6, 0xe6, 0xcb, 0x1b, 0x31, 0xad, 0x46, 0x58, + 0xf8, 0xf6, 0x4f, 0xbc, 0x5f, 0xc5, 0x40, 0xac, 0x90, 0xd3, 0x0f, 0x2a, 0x70, 0x25, 0xf7, 0xe0, + 0xa1, 0x3b, 0x80, 0xd7, 0x5b, 0x94, 0x41, 0x76, 0xc5, 0x93, 0x0f, 0xdb, 0x01, 0xab, 0x49, 0x10, + 0x4e, 0xd7, 0xa5, 0x62, 0x21, 0xdb, 0xa9, 0x77, 0x9a, 0x71, 0xfb, 0x4a, 0x2c, 0x16, 0x36, 0x52, + 0x30, 0x9c, 0xa9, 0x8d, 0x6a, 0x70, 0x51, 0x94, 0x35, 0xe8, 0xcd, 0x2a, 0xb8, 0xe3, 0x13, 0x29, + 0x70, 0xd3, 0x3b, 0xca, 0xc5, 0x46, 0x1a, 0x88, 0xb3, 0xf5, 0xe9, 0x28, 0xe8, 0x0f, 0xb5, 0x17, + 0xc3, 0xf1, 0x28, 0xd6, 0x92, 0x20, 0x9c, 0xae, 0x2b, 0xaf, 0xbe, 0x89, 0x2e, 0x8c, 0xc4, 0xa3, + 0x58, 0x4b, 0xc1, 0x70, 0xa6, 0xb6, 0xfe, 0x1f, 0x87, 0xe1, 0xc9, 0x63, 0x08, 0x6b, 0xa8, 0x9d, + 0x3f, 0xdd, 0x27, 0xdf, 0xb8, 0xc7, 0xfb, 0x3c, 0x9d, 0x82, 0xcf, 0x73, 0x72, 0x7a, 0xc7, 0xfd, + 0x9c, 0x41, 0xd1, 0xe7, 0x3c, 0x39, 0xc9, 0xe3, 0x7f, 0xfe, 0x76, 0xfe, 0xe7, 0x2f, 0x39, 0xab, + 0x47, 0x2e, 0x97, 0x4e, 0xc1, 0x72, 0x29, 0x39, 0xab, 0xc7, 0x58, 0x5e, 0x7f, 0x34, 0x0c, 0x4f, + 0x1d, 0x47, 0x70, 0x2c, 0xb9, 0xbe, 0x72, 0x58, 0xde, 0x99, 0xae, 0xaf, 0x22, 0xbf, 0xa6, 0x33, + 0x5c, 0x5f, 0x39, 0x24, 0xcf, 0x7a, 0x7d, 0x15, 0xcd, 0xea, 0x59, 0xad, 0xaf, 0xa2, 0x59, 0x3d, + 0xc6, 0xfa, 0xfa, 0x8b, 0xf4, 0xf9, 0x10, 0xc9, 0x8b, 0x0d, 0x18, 0x32, 0x3b, 0xdd, 0x92, 0x4c, + 0x8a, 0x19, 0x08, 0xd5, 0xd6, 0x37, 0x31, 0xc5, 0x81, 0x30, 0x8c, 0xf2, 0xf5, 0x53, 0x92, 0x05, + 0x31, 0x0f, 0x19, 0xbe, 0x24, 0xb1, 0xc0, 0x44, 0xa7, 0x8a, 0x74, 0x76, 0x48, 0x9b, 0xf8, 0x86, + 0xd3, 0x0c, 0x3d, 0xdf, 0x68, 0x95, 0xe5, 0x36, 0x6c, 0xaa, 0x96, 0x53, 0xb8, 0x70, 0x06, 0x3b, + 0x9d, 0x90, 0x8e, 0x6d, 0x95, 0xe4, 0x2f, 0x6c, 0x42, 0xd6, 0x1b, 0x75, 0x4c, 0x71, 0xe8, 0xff, + 0x70, 0x02, 0x94, 0xc0, 0x75, 0xe8, 0xbb, 0xe0, 0x9a, 0xe1, 0x38, 0xde, 0xc3, 0x75, 0xdf, 0xde, + 0xb3, 0x1d, 0xd2, 0x22, 0x56, 0x24, 0x4c, 0x05, 0xc2, 0x8c, 0x8c, 0x5d, 0x98, 0x16, 0x8b, 0x2a, + 0xe1, 0xe2, 0xf6, 0xe8, 0x1d, 0x0d, 0x2e, 0x9a, 0xe9, 0x60, 0x61, 0x83, 0x18, 0x9a, 0x64, 0x22, + 0x8f, 0xf1, 0xfd, 0x94, 0x29, 0xc6, 0x59, 0xb2, 0xe8, 0x07, 0x35, 0xae, 0x94, 0x8b, 0x9e, 0x49, + 0xc4, 0x37, 0xbb, 0x7b, 0x4a, 0x0f, 0x8a, 0xb1, 0x76, 0x2f, 0x7e, 0xbb, 0x4a, 0x12, 0x44, 0x5f, + 0xd4, 0xe0, 0xca, 0x6e, 0xde, 0x5b, 0x82, 0xf8, 0xb2, 0x0f, 0xca, 0x76, 0xa5, 0xe0, 0x71, 0x82, + 0x8b, 0xb3, 0xb9, 0x15, 0x70, 0x7e, 0x47, 0xa2, 0x59, 0x8a, 0xd4, 0xab, 0x82, 0x09, 0x94, 0x9e, + 0xa5, 0x94, 0x9e, 0x36, 0x9e, 0xa5, 0x08, 0x80, 0x93, 0x04, 0x51, 0x07, 0x26, 0x76, 0xa5, 0x4e, + 0x5b, 0xe8, 0xb1, 0x6a, 0x65, 0xa9, 0x2b, 0x8a, 0x71, 0x6e, 0x48, 0x13, 0x15, 0xe2, 0x98, 0x08, + 0xda, 0x81, 0xb1, 0x5d, 0xce, 0x88, 0x84, 0xfe, 0x69, 0x71, 0xe0, 0xfb, 0x31, 0x57, 0x83, 0x88, + 0x22, 0x2c, 0xd1, 0xab, 0x56, 0xb4, 0xe3, 0x47, 0x38, 0x77, 0x7c, 0x41, 0x83, 0x2b, 0x7b, 0xc4, + 0x0f, 0x6d, 0x33, 0xfd, 0x92, 0x33, 0x51, 0xfe, 0x0e, 0xff, 0x72, 0x1e, 0x42, 0xbe, 0x4c, 0x72, + 0x41, 0x38, 0xbf, 0x0b, 0xf4, 0x46, 0xcf, 0x15, 0xf2, 0xcd, 0xd0, 0x08, 0x6d, 0x73, 0xc3, 0xdb, + 0x25, 0x6e, 0x9c, 0x5f, 0x85, 0x69, 0x82, 0xc6, 0xf9, 0x8d, 0x7e, 0xb9, 0xb8, 0x1a, 0xee, 0x87, + 0x43, 0xff, 0x53, 0x0d, 0x32, 0x6a, 0x65, 0xf4, 0xe3, 0x1a, 0x4c, 0x6d, 0x13, 0x23, 0xec, 0xfa, + 0xe4, 0xae, 0x11, 0x46, 0x1e, 0xe7, 0x2f, 0x9f, 0x86, 0x36, 0x7b, 0xe1, 0x8e, 0x82, 0x98, 0x1b, + 0x04, 0x44, 0x41, 0x2f, 0x55, 0x10, 0x4e, 0xf4, 0x60, 0xfe, 0x45, 0xb8, 0x98, 0x69, 0x78, 0xa2, + 0x17, 0xc6, 0x7f, 0xa5, 0x41, 0x5e, 0x4a, 0x20, 0xf4, 0x3a, 0x8c, 0x18, 0x96, 0x15, 0xc5, 0xf8, + 0x7f, 0xbe, 0x9c, 0x6d, 0x8a, 0xa5, 0x3a, 0xf6, 0xb3, 0x9f, 0x98, 0xa3, 0x45, 0x77, 0x00, 0x19, + 0x89, 0x17, 0xee, 0xd5, 0xd8, 0x5d, 0x95, 0xbd, 0x84, 0x2d, 0x66, 0xa0, 0x38, 0xa7, 0x85, 0xfe, + 0x23, 0x1a, 0xa0, 0x6c, 0x98, 0x54, 0xe4, 0xc3, 0xb8, 0x58, 0xca, 0xf2, 0x2b, 0xd5, 0x4b, 0xba, + 0x94, 0x24, 0xfc, 0xa3, 0x62, 0x43, 0x27, 0x51, 0x10, 0xe0, 0x88, 0x8e, 0xfe, 0x97, 0x1a, 0xc4, + 0x71, 0xc0, 0xd1, 0x87, 0x61, 0xd2, 0x22, 0x81, 0xe9, 0xdb, 0x9d, 0x30, 0xf6, 0xa6, 0x8a, 0xbc, + 0x32, 0xea, 0x31, 0x08, 0xab, 0xf5, 0x90, 0x0e, 0xa3, 0xa1, 0x11, 0xec, 0x36, 0xea, 0xe2, 0x52, + 0xc9, 0x44, 0x80, 0x0d, 0x56, 0x82, 0x05, 0x24, 0x0e, 0x19, 0x36, 0x74, 0x8c, 0x90, 0x61, 0x68, + 0xfb, 0x14, 0xe2, 0xa3, 0xa1, 0xa3, 0x63, 0xa3, 0xe9, 0x3f, 0x5f, 0x81, 0x0b, 0xb4, 0xca, 0xaa, + 0x61, 0xbb, 0x21, 0x71, 0x99, 0xef, 0x40, 0xc9, 0x49, 0x68, 0xc1, 0x74, 0x98, 0xf0, 0x8d, 0x3b, + 0xb9, 0x67, 0x59, 0x64, 0x4d, 0x93, 0xf4, 0x88, 0x4b, 0xe2, 0x45, 0xcf, 0x4b, 0xe7, 0x0d, 0x7e, + 0xfd, 0x7e, 0x52, 0x2e, 0x55, 0xe6, 0x91, 0xf1, 0x48, 0x38, 0x1a, 0x46, 0xc1, 0xe3, 0x13, 0x7e, + 0x1a, 0x1f, 0x81, 0x69, 0x61, 0x44, 0xcd, 0x63, 0xbf, 0x89, 0xeb, 0x37, 0x3b, 0x61, 0xee, 0xa8, + 0x00, 0x9c, 0xac, 0xa7, 0xff, 0x7e, 0x05, 0x92, 0x21, 0xea, 0xcb, 0xce, 0x52, 0x36, 0xf0, 0x5d, + 0xe5, 0xcc, 0x02, 0xdf, 0x7d, 0x90, 0xe5, 0x77, 0xe1, 0x89, 0xc0, 0xf8, 0x13, 0xb9, 0x9a, 0x95, + 0x85, 0xa7, 0xf1, 0x8a, 0x6a, 0xc4, 0xd3, 0x3a, 0x7c, 0xe2, 0x69, 0xfd, 0xb0, 0xb0, 0xae, 0x1c, + 0x49, 0x84, 0x1f, 0x94, 0xd6, 0x95, 0x17, 0x13, 0x0d, 0x15, 0x57, 0x93, 0x2f, 0x6b, 0x30, 0x26, + 0x62, 0x03, 0x1f, 0xc3, 0x95, 0x69, 0x1b, 0x46, 0xd8, 0x95, 0x67, 0x10, 0x69, 0xb0, 0xb9, 0xe3, + 0x79, 0x61, 0x22, 0x42, 0x32, 0xf3, 0x1d, 0x60, 0xff, 0x62, 0x8e, 0x9e, 0x19, 0xd8, 0xf9, 0xe6, + 0x8e, 0x1d, 0x12, 0x33, 0x94, 0x71, 0x57, 0xa5, 0x81, 0x9d, 0x52, 0x8e, 0x13, 0xb5, 0xf4, 0x9f, + 0x1a, 0x86, 0x9b, 0x02, 0x71, 0x46, 0x44, 0x8a, 0x18, 0x5c, 0x0f, 0x2e, 0x89, 0x6f, 0x5b, 0xf7, + 0x0d, 0x3b, 0x32, 0x3d, 0x28, 0x77, 0xf5, 0x15, 0xc9, 0xee, 0x32, 0xe8, 0x70, 0x1e, 0x0d, 0x1e, + 0x41, 0x94, 0x15, 0xdf, 0x23, 0x86, 0x13, 0xee, 0x48, 0xda, 0x95, 0x41, 0x22, 0x88, 0x66, 0xf1, + 0xe1, 0x5c, 0x2a, 0xcc, 0xf4, 0x41, 0x00, 0x6a, 0x3e, 0x31, 0x54, 0xbb, 0x8b, 0x01, 0xcc, 0xff, + 0x57, 0x73, 0x31, 0xe2, 0x02, 0x4a, 0x4c, 0x87, 0x68, 0xec, 0x33, 0x95, 0x04, 0x26, 0xa1, 0x6f, + 0xb3, 0x48, 0xd7, 0x91, 0x16, 0x7d, 0x35, 0x09, 0xc2, 0xe9, 0xba, 0xe8, 0x36, 0xcc, 0x30, 0x53, + 0x92, 0x38, 0xd4, 0xd5, 0x48, 0x1c, 0x4d, 0x61, 0x2d, 0x01, 0xc1, 0xa9, 0x9a, 0xfa, 0x27, 0x2b, + 0x30, 0xa5, 0x2e, 0xbb, 0x63, 0xf8, 0x35, 0x75, 0x95, 0xc3, 0x70, 0x00, 0x9f, 0x1b, 0x95, 0xea, + 0x31, 0xce, 0x43, 0xf4, 0x2a, 0xcc, 0x74, 0x19, 0x07, 0x91, 0xe1, 0x3a, 0xc4, 0xfa, 0xff, 0x16, + 0x3a, 0xca, 0xcd, 0x04, 0xe4, 0xd1, 0x41, 0x75, 0x5e, 0x45, 0x9f, 0x84, 0xe2, 0x14, 0x1e, 0xfd, + 0xb3, 0x43, 0x70, 0x29, 0xa7, 0x37, 0xcc, 0xe4, 0x80, 0xa4, 0x8e, 0xec, 0x41, 0x4c, 0x0e, 0x32, + 0xc7, 0x7f, 0x64, 0x72, 0x90, 0x86, 0xe0, 0x0c, 0x5d, 0xf4, 0x32, 0x0c, 0x99, 0xbe, 0x2d, 0x26, + 0xfc, 0x23, 0xa5, 0x2e, 0x9c, 0xb8, 0xb1, 0x34, 0x29, 0x28, 0x0e, 0xd5, 0x70, 0x03, 0x53, 0x84, + 0xf4, 0xe0, 0x51, 0xd9, 0x85, 0x94, 0x02, 0xd8, 0xc1, 0xa3, 0x72, 0x95, 0x00, 0x27, 0xeb, 0xa1, + 0x57, 0x61, 0x4e, 0xdc, 0x04, 0xa4, 0x8f, 0xb4, 0xe7, 0x06, 0x21, 0xdd, 0xd9, 0xa1, 0x60, 0xd4, + 0x8f, 0x1f, 0x1e, 0x54, 0xe7, 0xee, 0x17, 0xd4, 0xc1, 0x85, 0xad, 0xf5, 0x3f, 0x1f, 0x82, 0x49, + 0x25, 0x32, 0x3b, 0x5a, 0x1d, 0x44, 0x85, 0x12, 0x8f, 0x58, 0xaa, 0x51, 0x56, 0x61, 0xa8, 0xd5, + 0xe9, 0x96, 0xd4, 0xa1, 0x44, 0xe8, 0xee, 0x52, 0x74, 0xad, 0x4e, 0x17, 0xbd, 0x1c, 0x69, 0x65, + 0xca, 0xe9, 0x4d, 0x22, 0x8f, 0x96, 0x94, 0x66, 0x46, 0x6e, 0xc4, 0xe1, 0xc2, 0x8d, 0xd8, 0x86, + 0xb1, 0x40, 0xa8, 0x6c, 0x46, 0xca, 0x47, 0xa5, 0x51, 0x66, 0x5a, 0xa8, 0x68, 0xf8, 0x7d, 0x4f, + 0x6a, 0x70, 0x24, 0x0d, 0x2a, 0x4b, 0x76, 0x99, 0x9f, 0x2c, 0xbb, 0xc8, 0x8e, 0x73, 0x59, 0x72, + 0x93, 0x95, 0x60, 0x01, 0xc9, 0x1c, 0x51, 0x63, 0xc7, 0x3a, 0xa2, 0xfe, 0x4e, 0x05, 0x50, 0xb6, + 0x1b, 0xe8, 0x49, 0x18, 0x61, 0x7e, 0xf6, 0x82, 0x17, 0x45, 0x92, 0x3f, 0xf3, 0xb4, 0xc6, 0x1c, + 0x86, 0x9a, 0x22, 0xc6, 0x46, 0xb9, 0xcf, 0xc9, 0x6c, 0x76, 0x04, 0x3d, 0x25, 0x20, 0xc7, 0xcd, + 0x84, 0x53, 0x46, 0xde, 0x99, 0xbf, 0x09, 0x63, 0x6d, 0xdb, 0x65, 0x0f, 0x87, 0xe5, 0x34, 0x59, + 0xdc, 0xb4, 0x80, 0xa3, 0xc0, 0x12, 0x97, 0xfe, 0x47, 0x15, 0xba, 0xf4, 0x63, 0x89, 0xb7, 0x07, + 0x60, 0x74, 0x43, 0x8f, 0x33, 0x30, 0xb1, 0x03, 0x1a, 0xe5, 0xbe, 0x72, 0x84, 0x74, 0x31, 0x42, + 0xc8, 0x9f, 0xbc, 0xe2, 0xdf, 0x58, 0x21, 0x46, 0x49, 0x87, 0x76, 0x9b, 0xbc, 0x62, 0xbb, 0x96, + 0xf7, 0x50, 0x4c, 0xef, 0xa0, 0xa4, 0x37, 0x22, 0x84, 0x9c, 0x74, 0xfc, 0x1b, 0x2b, 0xc4, 0x28, + 0x6b, 0x61, 0x17, 0x67, 0x97, 0xa5, 0xca, 0x10, 0x7d, 0xf3, 0x1c, 0x47, 0x9e, 0xca, 0xe3, 0x9c, + 0xb5, 0xd4, 0x0a, 0xea, 0xe0, 0xc2, 0xd6, 0xfa, 0x2f, 0x68, 0x70, 0x25, 0x77, 0x2a, 0xd0, 0x5d, + 0xb8, 0x18, 0x9b, 0x79, 0xa9, 0xcc, 0x7e, 0x3c, 0x4e, 0xd1, 0x72, 0x3f, 0x5d, 0x01, 0x67, 0xdb, + 0xf0, 0x3c, 0xc0, 0x99, 0xc3, 0x44, 0xd8, 0x88, 0xa9, 0xa2, 0x91, 0x0a, 0xc6, 0x79, 0x6d, 0xf4, + 0xef, 0x4a, 0x74, 0x36, 0x9e, 0x2c, 0xba, 0x33, 0xb6, 0x48, 0x2b, 0x72, 0x8a, 0x8b, 0x76, 0xc6, + 0x12, 0x2d, 0xc4, 0x1c, 0x86, 0x9e, 0x50, 0x5d, 0x4d, 0x23, 0xbe, 0x25, 0xdd, 0x4d, 0xf5, 0xef, + 0x81, 0xc7, 0x0a, 0x5e, 0x42, 0x51, 0x1d, 0xa6, 0x82, 0x87, 0x46, 0x67, 0x89, 0xec, 0x18, 0x7b, + 0xb6, 0x08, 0x5d, 0xc0, 0xcd, 0xf7, 0xa6, 0x9a, 0x4a, 0xf9, 0xa3, 0xd4, 0x6f, 0x9c, 0x68, 0xa5, + 0x87, 0x00, 0xc2, 0xcc, 0xd3, 0x76, 0x5b, 0x68, 0x1b, 0xc6, 0x0d, 0x91, 0x86, 0x56, 0xac, 0xe3, + 0x6f, 0x2f, 0xa5, 0x04, 0x10, 0x38, 0xb8, 0xfd, 0xb9, 0xfc, 0x85, 0x23, 0xdc, 0xfa, 0x3f, 0xd1, + 0xe0, 0x6a, 0xbe, 0xb3, 0xfa, 0x31, 0x44, 0x9b, 0x36, 0x4c, 0xfa, 0x71, 0x33, 0xb1, 0xe8, 0xbf, + 0x4d, 0x8d, 0x56, 0xaa, 0x84, 0xe7, 0xa2, 0x62, 0x5f, 0xcd, 0xf7, 0x02, 0xf9, 0xe5, 0xd3, 0x01, + 0x4c, 0xa3, 0x2b, 0x97, 0xd2, 0x13, 0xac, 0xe2, 0xd7, 0x7f, 0xad, 0x02, 0xb0, 0x46, 0xc2, 0x87, + 0x9e, 0xbf, 0x4b, 0xa7, 0xe8, 0xf1, 0xc4, 0x4d, 0x63, 0xfc, 0xeb, 0x17, 0x30, 0xe1, 0x71, 0x18, + 0xee, 0x78, 0x56, 0x20, 0xd8, 0x1f, 0xeb, 0x08, 0xb3, 0x80, 0x62, 0xa5, 0xa8, 0x0a, 0x23, 0xec, + 0xe1, 0x43, 0x9c, 0x4c, 0xec, 0x9e, 0x42, 0xa5, 0xcc, 0x00, 0xf3, 0x72, 0x9e, 0x5c, 0x8c, 0xf9, + 0x74, 0x04, 0xe2, 0xe2, 0x25, 0x92, 0x8b, 0xf1, 0x32, 0x1c, 0x41, 0xd1, 0x6d, 0x00, 0xbb, 0x73, + 0xc7, 0x68, 0xdb, 0x0e, 0x95, 0x79, 0x47, 0xa3, 0x5c, 0xb6, 0xd0, 0x58, 0x97, 0xa5, 0x8f, 0x0e, + 0xaa, 0xe3, 0xe2, 0x57, 0x0f, 0x2b, 0xb5, 0xf5, 0xbf, 0x1a, 0x82, 0x44, 0xde, 0xe7, 0x58, 0xc7, + 0xa4, 0x9d, 0x8d, 0x8e, 0xe9, 0x55, 0x98, 0x73, 0x3c, 0xc3, 0x5a, 0x32, 0x1c, 0xba, 0x1b, 0xfd, + 0x26, 0xff, 0x8c, 0x86, 0xdb, 0x8a, 0x92, 0xfb, 0x32, 0xae, 0xb4, 0x52, 0x50, 0x07, 0x17, 0xb6, + 0x46, 0x61, 0x94, 0x6d, 0x7a, 0xa8, 0xbc, 0xfb, 0xa3, 0x3a, 0x17, 0x0b, 0xaa, 0x27, 0x50, 0x24, + 0x60, 0xa4, 0x12, 0x52, 0x7f, 0x4a, 0x83, 0x2b, 0x64, 0x9f, 0x7b, 0xc2, 0x6d, 0xf8, 0xc6, 0xf6, + 0xb6, 0x6d, 0x0a, 0xbb, 0x54, 0xfe, 0x61, 0x57, 0x0e, 0x0f, 0xaa, 0x57, 0x96, 0xf3, 0x2a, 0x3c, + 0x3a, 0xa8, 0xde, 0xca, 0x75, 0x4c, 0x64, 0x9f, 0x35, 0xb7, 0x09, 0xce, 0x27, 0x35, 0xff, 0x3c, + 0x4c, 0x9e, 0xc0, 0x9b, 0x21, 0xe1, 0x7e, 0xf8, 0xeb, 0x15, 0x98, 0xa2, 0xeb, 0x6e, 0xc5, 0x33, + 0x0d, 0xa7, 0xbe, 0xd6, 0x3c, 0x41, 0xb6, 0x74, 0xb4, 0x02, 0x97, 0xb7, 0x3d, 0xdf, 0x24, 0x1b, + 0xb5, 0xf5, 0x0d, 0x4f, 0x3c, 0xb9, 0xd4, 0xd7, 0x9a, 0x82, 0x4b, 0xb3, 0x4b, 0xe4, 0x9d, 0x1c, + 0x38, 0xce, 0x6d, 0x85, 0x1e, 0xc0, 0x95, 0xb8, 0x7c, 0xb3, 0xc3, 0x0d, 0x59, 0x28, 0xba, 0xa1, + 0xd8, 0x10, 0xe7, 0x4e, 0x5e, 0x05, 0x9c, 0xdf, 0x0e, 0x19, 0x70, 0x5d, 0xc4, 0x24, 0xb9, 0xe3, + 0xf9, 0x0f, 0x0d, 0xdf, 0x4a, 0xa2, 0x1d, 0x8e, 0x55, 0xd2, 0xf5, 0xe2, 0x6a, 0xb8, 0x1f, 0x0e, + 0xfd, 0xa7, 0x47, 0x41, 0x71, 0x57, 0x3b, 0x41, 0x3a, 0xaa, 0x9f, 0xd3, 0xe0, 0xb2, 0xe9, 0xd8, + 0xc4, 0x0d, 0x53, 0xbe, 0x49, 0x9c, 0x1d, 0x6d, 0x96, 0xf2, 0xa3, 0xeb, 0x10, 0xb7, 0x51, 0x17, + 0x76, 0x3f, 0xb5, 0x1c, 0xe4, 0xc2, 0x36, 0x2a, 0x07, 0x82, 0x73, 0x3b, 0xc3, 0xc6, 0xc3, 0xca, + 0x1b, 0x75, 0x35, 0x98, 0x42, 0x4d, 0x94, 0xe1, 0x08, 0x8a, 0x9e, 0x85, 0xc9, 0x96, 0xef, 0x75, + 0x3b, 0x41, 0x8d, 0x19, 0x1b, 0xf3, 0xb5, 0xcf, 0xe4, 0xc2, 0xbb, 0x71, 0x31, 0x56, 0xeb, 0x50, + 0x29, 0x97, 0xff, 0x5c, 0xf7, 0xc9, 0xb6, 0xbd, 0x2f, 0x98, 0x1c, 0x93, 0x72, 0xef, 0x2a, 0xe5, + 0x38, 0x51, 0x8b, 0xf9, 0x43, 0x07, 0x41, 0x97, 0xf8, 0x9b, 0x78, 0x45, 0xe4, 0x71, 0xe0, 0xfe, + 0xd0, 0xb2, 0x10, 0xc7, 0x70, 0xf4, 0x13, 0x1a, 0xcc, 0xf8, 0xe4, 0xcd, 0xae, 0xed, 0x13, 0x8b, + 0x11, 0x0d, 0x84, 0xcf, 0x20, 0x1e, 0xcc, 0x4f, 0x71, 0x01, 0x27, 0x90, 0x72, 0x0e, 0x11, 0xa9, + 0xed, 0x92, 0x40, 0x9c, 0xea, 0x01, 0x9d, 0xaa, 0xc0, 0x6e, 0xb9, 0xb6, 0xdb, 0x5a, 0x74, 0x5a, + 0xc1, 0xdc, 0x38, 0x63, 0x7a, 0x5c, 0x84, 0x8e, 0x8b, 0xb1, 0x5a, 0x87, 0x5e, 0x2f, 0xbb, 0x01, + 0xdd, 0xf7, 0x6d, 0xc2, 0xe7, 0x77, 0x22, 0xd6, 0x6b, 0x6e, 0xaa, 0x00, 0x9c, 0xac, 0x87, 0x6e, + 0xc3, 0x8c, 0x2c, 0x10, 0xb3, 0x0c, 0x3c, 0x8a, 0x1e, 0xbb, 0xee, 0x27, 0x20, 0x38, 0x55, 0x73, + 0x7e, 0x11, 0x2e, 0xe5, 0x0c, 0xf3, 0x44, 0xcc, 0xe5, 0xff, 0x6a, 0x70, 0x85, 0xe7, 0xd2, 0x94, + 0x19, 0x20, 0x64, 0xb8, 0xbc, 0xfc, 0xc8, 0x73, 0xda, 0x99, 0x46, 0x9e, 0xfb, 0x3a, 0x44, 0xd8, + 0xd3, 0xff, 0x51, 0x05, 0xde, 0x7b, 0xe4, 0xbe, 0x44, 0x7f, 0x5f, 0x83, 0x49, 0xb2, 0x1f, 0xfa, + 0x46, 0xe4, 0x91, 0x41, 0x17, 0xe9, 0xf6, 0x99, 0x30, 0x81, 0x85, 0xe5, 0x98, 0x10, 0x5f, 0xb8, + 0x91, 0x88, 0xa5, 0x40, 0xb0, 0xda, 0x1f, 0x7a, 0x69, 0xe5, 0x51, 0x26, 0xd5, 0x07, 0x10, 0x91, + 0xe2, 0x58, 0x40, 0xe6, 0x3f, 0x0e, 0xb3, 0x69, 0xcc, 0x27, 0x5a, 0x2b, 0xbf, 0x5a, 0x81, 0xb1, + 0x75, 0xdf, 0xa3, 0xd2, 0xdf, 0x39, 0x84, 0x55, 0x30, 0x12, 0x91, 0xd7, 0x4b, 0x79, 0x4a, 0x8b, + 0xce, 0x16, 0x66, 0x7d, 0xb0, 0x53, 0x59, 0x1f, 0x16, 0x07, 0x21, 0xd2, 0x3f, 0xcd, 0xc3, 0xef, + 0x6a, 0x30, 0x29, 0x6a, 0x9e, 0x43, 0xf0, 0x80, 0xef, 0x4d, 0x06, 0x0f, 0xf8, 0xd8, 0x00, 0xe3, + 0x2a, 0x88, 0x1a, 0xf0, 0x05, 0x0d, 0xa6, 0x45, 0x8d, 0x55, 0xd2, 0xde, 0x22, 0x3e, 0xba, 0x03, + 0x63, 0x41, 0x97, 0x7d, 0x48, 0x31, 0xa0, 0xeb, 0xea, 0x7d, 0xc2, 0xdf, 0x32, 0x4c, 0x96, 0xa7, + 0x9b, 0x57, 0x51, 0x72, 0x29, 0xf0, 0x02, 0x2c, 0x1b, 0xd3, 0xdb, 0x8b, 0xef, 0x39, 0x99, 0x70, + 0x52, 0xd8, 0x73, 0x08, 0x66, 0x10, 0x2a, 0x98, 0xd3, 0xbf, 0x52, 0x85, 0xc7, 0x04, 0x73, 0x0a, + 0x0e, 0x30, 0x2f, 0xd7, 0x3f, 0x3d, 0x1c, 0x4d, 0x36, 0x8b, 0x77, 0x7e, 0x0f, 0x26, 0x4c, 0x9f, + 0x18, 0x21, 0xb1, 0x96, 0x7a, 0xc7, 0xe9, 0x1c, 0x3b, 0xae, 0x6a, 0xb2, 0x05, 0x8e, 0x1b, 0xd3, + 0x93, 0x41, 0x7d, 0x73, 0xaa, 0xc4, 0x87, 0x68, 0xe1, 0x7b, 0xd3, 0xb7, 0xc3, 0x88, 0xf7, 0xd0, + 0x8d, 0x4c, 0x57, 0xfa, 0x12, 0x66, 0x43, 0x79, 0x40, 0x6b, 0x63, 0xde, 0x48, 0x0d, 0xa7, 0x36, + 0xdc, 0x27, 0x9c, 0x9a, 0x03, 0x63, 0x6d, 0xf6, 0x19, 0x06, 0x0a, 0xad, 0x9f, 0xf8, 0xa0, 0x6a, + 0xf2, 0x25, 0x86, 0x19, 0x4b, 0x12, 0xf4, 0x84, 0xa7, 0xa7, 0x50, 0xd0, 0x31, 0x4c, 0xa2, 0x9e, + 0xf0, 0x6b, 0xb2, 0x10, 0xc7, 0x70, 0xd4, 0x4b, 0xc6, 0xe9, 0x1b, 0x2b, 0xaf, 0xc1, 0x13, 0xdd, + 0x53, 0x42, 0xf3, 0xf1, 0xa9, 0x2f, 0x8c, 0xd5, 0xf7, 0xa3, 0xc3, 0xd1, 0x22, 0x15, 0x99, 0x32, + 0xf2, 0x73, 0x4b, 0x6b, 0xa5, 0x72, 0x4b, 0x7f, 0xab, 0x8c, 0x27, 0x5b, 0x49, 0x24, 0x0a, 0x8b, + 0xe2, 0xc9, 0x4e, 0x09, 0xd2, 0x89, 0x18, 0xb2, 0x5d, 0xb8, 0x14, 0x84, 0x86, 0x43, 0x9a, 0xb6, + 0xd0, 0x74, 0x04, 0xa1, 0xd1, 0xee, 0x94, 0x08, 0xe8, 0xca, 0xfd, 0x17, 0xb2, 0xa8, 0x70, 0x1e, + 0x7e, 0xf4, 0x43, 0x1a, 0xcc, 0xb1, 0xf2, 0xc5, 0x6e, 0xe8, 0xf1, 0xc8, 0xe3, 0x31, 0xf1, 0x93, + 0x3f, 0x6c, 0xb3, 0x0b, 0x60, 0xb3, 0x00, 0x1f, 0x2e, 0xa4, 0x84, 0xde, 0x86, 0x2b, 0xf4, 0x04, + 0x5e, 0x34, 0x43, 0x7b, 0xcf, 0x0e, 0x7b, 0x71, 0x17, 0x4e, 0x1e, 0xc5, 0x95, 0x5d, 0x36, 0x56, + 0xf2, 0x90, 0xe1, 0x7c, 0x1a, 0xfa, 0x5f, 0x68, 0x80, 0xb2, 0x4b, 0x08, 0x39, 0x30, 0x6e, 0x49, + 0x87, 0x02, 0xed, 0x54, 0x82, 0x48, 0x46, 0x9c, 0x39, 0xf2, 0x43, 0x88, 0x28, 0x20, 0x0f, 0x26, + 0x1e, 0xee, 0xd8, 0x21, 0x71, 0xec, 0x20, 0x3c, 0xa5, 0x98, 0x95, 0x51, 0x00, 0xb7, 0x57, 0x24, + 0x62, 0x1c, 0xd3, 0xd0, 0x7f, 0x6c, 0x18, 0xc6, 0xa3, 0x10, 0xda, 0x47, 0xbf, 0xf1, 0x76, 0x01, + 0x99, 0x4a, 0x1a, 0xb2, 0x41, 0x34, 0x30, 0x4c, 0x08, 0xab, 0x65, 0x90, 0xe1, 0x1c, 0x02, 0xe8, + 0x6d, 0xb8, 0x6c, 0xbb, 0xdb, 0xbe, 0x11, 0x84, 0x7e, 0x97, 0xe9, 0xca, 0x07, 0xc9, 0xe6, 0xc5, + 0xee, 0x50, 0x8d, 0x1c, 0x74, 0x38, 0x97, 0x08, 0x22, 0x30, 0xc6, 0x33, 0x05, 0xc8, 0x70, 0x82, + 0xa5, 0xf2, 0xd2, 0xf2, 0x0c, 0x04, 0x31, 0xd7, 0xe4, 0xbf, 0x03, 0x2c, 0x71, 0xf3, 0x50, 0x1f, + 0xfc, 0x7f, 0xf9, 0x1e, 0x2d, 0xd6, 0x7d, 0xad, 0x3c, 0xbd, 0x38, 0xc5, 0x31, 0x0f, 0xf5, 0x91, + 0x2c, 0xc4, 0x69, 0x82, 0xfa, 0x6f, 0x6b, 0x30, 0xc2, 0x1d, 0x75, 0xcf, 0x5e, 0x82, 0xfb, 0x9e, + 0x84, 0x04, 0x57, 0x2a, 0x21, 0x11, 0xeb, 0x6a, 0x61, 0xaa, 0x9c, 0x2f, 0x6b, 0x30, 0xc1, 0x6a, + 0x9c, 0x83, 0x48, 0xf5, 0x7a, 0x52, 0xa4, 0x7a, 0xbe, 0xf4, 0x68, 0x0a, 0x04, 0xaa, 0xdf, 0x1e, + 0x12, 0x63, 0x61, 0x12, 0x4b, 0x03, 0x2e, 0x09, 0x6b, 0xd8, 0x15, 0x7b, 0x9b, 0xd0, 0x25, 0x5e, + 0x37, 0x7a, 0xfc, 0x81, 0x68, 0x44, 0xf8, 0x62, 0x65, 0xc1, 0x38, 0xaf, 0x0d, 0xfa, 0x75, 0x8d, + 0xca, 0x06, 0xa1, 0x6f, 0x9b, 0x03, 0xe5, 0x9f, 0x89, 0xfa, 0xb6, 0xb0, 0xca, 0x91, 0xf1, 0x9b, + 0xc9, 0x66, 0x2c, 0x24, 0xb0, 0xd2, 0x47, 0x07, 0xd5, 0x6a, 0x8e, 0xca, 0x2c, 0xce, 0x45, 0x11, + 0x84, 0x9f, 0xfa, 0xe3, 0xbe, 0x55, 0x98, 0x9a, 0x5a, 0xf6, 0x18, 0xdd, 0x83, 0x91, 0xc0, 0xf4, + 0x3a, 0xe4, 0x24, 0x19, 0xb5, 0xa2, 0x09, 0x6e, 0xd2, 0x96, 0x98, 0x23, 0x98, 0x7f, 0x03, 0xa6, + 0xd4, 0x9e, 0xe7, 0xdc, 0x7c, 0xea, 0xea, 0xcd, 0xe7, 0xc4, 0x2f, 0x5d, 0xea, 0x4d, 0xe9, 0x37, + 0x2a, 0x30, 0xca, 0xf3, 0x52, 0x1f, 0x43, 0x19, 0x6f, 0xcb, 0xa0, 0xff, 0x03, 0xa4, 0xdb, 0x57, + 0x23, 0x64, 0xbe, 0xe6, 0xb9, 0xca, 0x1c, 0xa8, 0x71, 0xff, 0x91, 0x1b, 0xc5, 0x4d, 0x1d, 0x2a, + 0x9f, 0xf5, 0x87, 0x0f, 0xec, 0xac, 0x23, 0xa5, 0xfe, 0x9e, 0x06, 0x53, 0x89, 0x40, 0xb4, 0x6d, + 0x18, 0xf2, 0xa3, 0x7c, 0x70, 0x65, 0xdf, 0x2a, 0xa4, 0x4d, 0xd5, 0xf5, 0x3e, 0x95, 0x30, 0xa5, + 0x13, 0xc5, 0xac, 0xad, 0x9c, 0x52, 0xcc, 0x5a, 0xfd, 0x73, 0x1a, 0x5c, 0x95, 0x03, 0x4a, 0x46, + 0x64, 0x42, 0x4f, 0xc3, 0xb8, 0xd1, 0xb1, 0x99, 0x4a, 0x4d, 0x55, 0x4a, 0x2e, 0xae, 0x37, 0x58, + 0x19, 0x8e, 0xa0, 0xe8, 0x83, 0x30, 0x2e, 0x17, 0x9e, 0x10, 0x3b, 0x23, 0x9e, 0x15, 0xbd, 0xbe, + 0x44, 0x35, 0xd0, 0xfb, 0x94, 0xbc, 0x0c, 0x23, 0xb1, 0x9c, 0x10, 0x11, 0xe6, 0xaf, 0xc0, 0xfa, + 0xb7, 0xc1, 0x44, 0xb3, 0x79, 0x6f, 0xd1, 0x34, 0x49, 0x10, 0x9c, 0x40, 0xb9, 0xac, 0x7f, 0x66, + 0x08, 0xa6, 0x45, 0x68, 0x39, 0xdb, 0xb5, 0x6c, 0xb7, 0x75, 0x0e, 0x67, 0xca, 0x06, 0x4c, 0x70, + 0x6d, 0xc6, 0x11, 0xb9, 0xfb, 0x9a, 0xb2, 0x52, 0x3a, 0x80, 0x73, 0x04, 0xc0, 0x31, 0x22, 0x74, + 0x1f, 0x46, 0xdf, 0xa4, 0xfc, 0x4d, 0xee, 0x8b, 0x63, 0xb1, 0x99, 0x68, 0xd1, 0x33, 0xd6, 0x18, + 0x60, 0x81, 0x02, 0x05, 0xcc, 0xe8, 0x8f, 0x09, 0x5c, 0x83, 0xc4, 0xae, 0x48, 0xcc, 0x6c, 0x94, + 0x95, 0x65, 0x4a, 0xd8, 0x0e, 0xb2, 0x5f, 0x38, 0x22, 0xc4, 0xa2, 0xcf, 0x27, 0x5a, 0xbc, 0x4b, + 0xa2, 0xcf, 0x27, 0xfa, 0x5c, 0x70, 0x34, 0x3e, 0x0f, 0x57, 0x72, 0x27, 0xe3, 0x68, 0x71, 0x56, + 0xff, 0xa5, 0x0a, 0x0c, 0x37, 0x09, 0xb1, 0xce, 0x61, 0x65, 0xbe, 0x9e, 0x90, 0x76, 0xbe, 0xbd, + 0x74, 0xfc, 0xfb, 0x22, 0x65, 0xd5, 0x76, 0x4a, 0x59, 0xf5, 0xf1, 0xd2, 0x14, 0xfa, 0x6b, 0xaa, + 0x7e, 0xa6, 0x02, 0x40, 0xab, 0x2d, 0x19, 0xe6, 0x2e, 0xe7, 0x38, 0xd1, 0x6a, 0xd6, 0x92, 0x1c, + 0x27, 0xbb, 0x0c, 0xcf, 0xf3, 0xf1, 0x56, 0x87, 0x51, 0x9f, 0x9d, 0x44, 0xe2, 0xdd, 0x03, 0x78, + 0x42, 0x69, 0x5a, 0x82, 0x05, 0x24, 0xc9, 0x2d, 0x86, 0x4f, 0x89, 0x5b, 0xe8, 0xfb, 0xc0, 0x32, + 0x80, 0xd6, 0xd7, 0x9a, 0xa8, 0xad, 0xcc, 0x4e, 0xa5, 0xbc, 0x2c, 0x2f, 0xd0, 0x1d, 0xb9, 0xcb, + 0x3f, 0xa3, 0xc1, 0x85, 0x54, 0xdd, 0x63, 0xdc, 0xe9, 0xce, 0x84, 0x67, 0xea, 0xbf, 0xa5, 0xc1, + 0x38, 0xed, 0xcb, 0x39, 0x30, 0x9a, 0xff, 0x3f, 0xc9, 0x68, 0x3e, 0x5a, 0x76, 0x8a, 0x0b, 0xf8, + 0xcb, 0x9f, 0x55, 0x80, 0x25, 0x9a, 0x10, 0x26, 0x0a, 0xca, 0xcb, 0xbf, 0x56, 0xf0, 0xf2, 0x7f, + 0x53, 0x18, 0x0e, 0xa4, 0x74, 0x94, 0x8a, 0xf1, 0xc0, 0x07, 0x15, 0xdb, 0x80, 0xa1, 0xe4, 0xb6, + 0xc9, 0xb1, 0x0f, 0x78, 0x0b, 0xa6, 0x83, 0x1d, 0xcf, 0x0b, 0xa3, 0xc8, 0x06, 0xc3, 0xe5, 0xf5, + 0xd1, 0xcc, 0xc2, 0x5a, 0x0e, 0x85, 0x3f, 0x40, 0x35, 0x55, 0xdc, 0x38, 0x49, 0x0a, 0x2d, 0x00, + 0x6c, 0x39, 0x9e, 0xb9, 0x5b, 0x6b, 0xd4, 0xb1, 0xb4, 0xa8, 0x65, 0x46, 0x4b, 0x4b, 0x51, 0x29, + 0x56, 0x6a, 0x0c, 0x64, 0xcb, 0xf0, 0x27, 0x1a, 0x9f, 0xe9, 0x13, 0x2c, 0xde, 0x73, 0xe4, 0x28, + 0xef, 0x4f, 0x71, 0x14, 0x25, 0x4d, 0x7d, 0x82, 0xab, 0x54, 0xa5, 0xc0, 0x3e, 0x1c, 0xeb, 0x9f, + 0x13, 0xe9, 0xb5, 0x7e, 0x55, 0x0c, 0x33, 0xca, 0x55, 0xd2, 0x81, 0x69, 0x47, 0x4d, 0x99, 0x2a, + 0xf6, 0x48, 0xa9, 0x6c, 0xab, 0x91, 0x8b, 0x46, 0xa2, 0x18, 0x27, 0x09, 0xa0, 0x8f, 0xc0, 0xb4, + 0x1c, 0x1d, 0x9d, 0x4c, 0x69, 0xb9, 0xc1, 0x96, 0xc3, 0xba, 0x0a, 0xc0, 0xc9, 0x7a, 0xfa, 0xe7, + 0x2b, 0xf0, 0x04, 0xef, 0x3b, 0xd3, 0x18, 0xd4, 0x49, 0x87, 0xb8, 0x16, 0x71, 0xcd, 0x1e, 0x93, + 0x59, 0x2d, 0xaf, 0x85, 0xde, 0x86, 0xd1, 0x87, 0x84, 0x58, 0x91, 0x46, 0xfb, 0x95, 0xf2, 0xa9, + 0x5e, 0x0a, 0x48, 0xbc, 0xc2, 0xd0, 0x73, 0x8e, 0xce, 0xff, 0xc7, 0x82, 0x24, 0x25, 0xde, 0xf1, + 0xbd, 0xad, 0x48, 0xb4, 0x3a, 0x7d, 0xe2, 0xeb, 0x0c, 0x3d, 0x27, 0xce, 0xff, 0xc7, 0x82, 0xa4, + 0xbe, 0x0e, 0x4f, 0x1e, 0xa3, 0xe9, 0x49, 0x44, 0xe8, 0xa3, 0x30, 0xf2, 0xd1, 0x9f, 0x04, 0xe3, + 0x1f, 0x6a, 0xf0, 0x94, 0x82, 0x72, 0x79, 0x9f, 0x4a, 0xf5, 0x35, 0xa3, 0x63, 0x98, 0xf4, 0x8e, + 0xca, 0xbc, 0xb5, 0x4f, 0x94, 0x7a, 0xe2, 0x33, 0x1a, 0x8c, 0x71, 0x43, 0x1a, 0xc9, 0x7e, 0x5f, + 0x1f, 0x70, 0xca, 0x0b, 0xbb, 0x24, 0x63, 0x1a, 0xcb, 0xb1, 0xf1, 0xdf, 0x01, 0x96, 0xf4, 0xf5, + 0x7f, 0x33, 0x02, 0xdf, 0x74, 0x7c, 0x44, 0xe8, 0x4f, 0xb4, 0x6c, 0x9e, 0xdb, 0xf6, 0xd9, 0x76, + 0x3e, 0xd2, 0x62, 0x88, 0x8b, 0xf1, 0x2b, 0x99, 0xbc, 0x31, 0xa7, 0xa4, 0x20, 0x51, 0x92, 0xea, + 0xfe, 0x53, 0x0d, 0xa6, 0xe8, 0xb1, 0x14, 0x31, 0x17, 0xfe, 0x99, 0x3a, 0x67, 0x3c, 0xd2, 0x35, + 0x85, 0x64, 0xca, 0xf3, 0x52, 0x05, 0xe1, 0x44, 0xdf, 0xd0, 0x66, 0xf2, 0x35, 0x88, 0x5f, 0xb7, + 0x6e, 0xe4, 0x49, 0x23, 0x27, 0xc9, 0xca, 0x34, 0xef, 0xc0, 0x4c, 0x72, 0xe6, 0xcf, 0x52, 0xbd, + 0x33, 0xff, 0x22, 0x5c, 0xcc, 0x8c, 0xfe, 0x44, 0xca, 0x8d, 0xbf, 0x3d, 0x0c, 0x55, 0x65, 0xaa, + 0x13, 0xa6, 0x74, 0x52, 0x26, 0xf8, 0x29, 0x0d, 0x26, 0x0d, 0xd7, 0x15, 0xe6, 0x18, 0x72, 0xfd, + 0x5a, 0x03, 0x7e, 0xd5, 0x3c, 0x52, 0x0b, 0x8b, 0x31, 0x99, 0x94, 0xbd, 0x81, 0x02, 0xc1, 0x6a, + 0x6f, 0xfa, 0x18, 0xd5, 0x55, 0xce, 0xcd, 0xa8, 0x0e, 0x7d, 0xbf, 0x3c, 0x88, 0xf9, 0x32, 0x7a, + 0xf5, 0x0c, 0xe6, 0x86, 0x9d, 0xeb, 0xf9, 0xda, 0xb4, 0xf9, 0x8f, 0xc3, 0x6c, 0x7a, 0xe6, 0x4e, + 0xb4, 0x0a, 0x7e, 0x69, 0x28, 0xc1, 0xaa, 0x0b, 0xc9, 0x1f, 0x43, 0x87, 0xf8, 0xc5, 0xd4, 0x62, + 0xe1, 0x2c, 0xc0, 0x3e, 0xab, 0x09, 0x39, 0xdd, 0x15, 0x33, 0x74, 0x7e, 0x66, 0x98, 0x83, 0x7e, + 0xb2, 0x25, 0xb8, 0xa2, 0xcc, 0x8f, 0x92, 0x05, 0xef, 0x19, 0x18, 0xdb, 0xb3, 0x03, 0x5b, 0xc6, + 0xd1, 0x51, 0x4e, 0xe8, 0x97, 0x79, 0x31, 0x96, 0x70, 0x7d, 0x25, 0xb1, 0xf7, 0x37, 0xbc, 0x8e, + 0xe7, 0x78, 0xad, 0xde, 0xe2, 0x43, 0xc3, 0x27, 0xd8, 0xeb, 0x86, 0x02, 0xdb, 0x71, 0xcf, 0xfb, + 0x55, 0xb8, 0xa9, 0x60, 0xcb, 0x0d, 0x08, 0x70, 0x12, 0x74, 0xbf, 0x3b, 0x26, 0x45, 0x57, 0xe1, + 0x31, 0xf9, 0x2b, 0x1a, 0x5c, 0x23, 0x45, 0x47, 0x81, 0x90, 0x63, 0x5f, 0x3d, 0xab, 0xa3, 0x46, + 0xc4, 0x59, 0x2d, 0x02, 0xe3, 0xe2, 0x9e, 0xa1, 0x5e, 0x22, 0x17, 0x64, 0x65, 0x10, 0x3d, 0x5c, + 0xce, 0xf7, 0xee, 0x97, 0x09, 0x12, 0xfd, 0xac, 0x06, 0x97, 0x9d, 0x9c, 0xad, 0x23, 0x44, 0xd6, + 0xe6, 0x19, 0xec, 0x4a, 0xfe, 0xe6, 0x99, 0x07, 0xc1, 0xb9, 0x5d, 0x41, 0x3f, 0x5f, 0x18, 0xa9, + 0x62, 0xa4, 0x7c, 0x52, 0xfe, 0xa3, 0x16, 0x62, 0x89, 0xa0, 0x15, 0x9f, 0xd7, 0x00, 0x59, 0x19, + 0xb1, 0x58, 0x58, 0x91, 0xbc, 0x74, 0xea, 0xc2, 0x3f, 0x7f, 0xb4, 0xce, 0x96, 0xe3, 0x9c, 0x4e, + 0xb0, 0xef, 0x1c, 0xe6, 0x6c, 0x5f, 0x11, 0x82, 0x76, 0xd0, 0xef, 0x9c, 0xc7, 0x19, 0xf8, 0x77, + 0xce, 0x83, 0xe0, 0xdc, 0xae, 0xe8, 0x9f, 0x1b, 0xe3, 0x5a, 0x1a, 0xf6, 0xaa, 0xb8, 0x05, 0xa3, + 0x5b, 0x4c, 0xab, 0x27, 0xf6, 0x6d, 0x69, 0x15, 0x22, 0xd7, 0x0d, 0xf2, 0x3b, 0x12, 0xff, 0x1f, + 0x0b, 0xcc, 0xe8, 0x35, 0x18, 0xb2, 0xdc, 0x40, 0x6c, 0xb8, 0x8f, 0x0d, 0xa0, 0x0c, 0x8b, 0x5d, + 0x79, 0xea, 0x6b, 0x4d, 0x4c, 0x91, 0x22, 0x17, 0xc6, 0x5d, 0xa1, 0xd8, 0x10, 0x77, 0xcf, 0xd2, + 0x69, 0x46, 0x23, 0x05, 0x49, 0xa4, 0x96, 0x91, 0x25, 0x38, 0xa2, 0x41, 0xe9, 0xa5, 0x34, 0xf9, + 0xa5, 0xe9, 0x45, 0xaa, 0xbd, 0x7e, 0xda, 0xd3, 0x75, 0x55, 0x51, 0x37, 0x72, 0x7c, 0x45, 0xdd, + 0x74, 0xe1, 0xc3, 0x06, 0x81, 0xd1, 0xd0, 0xb0, 0xdd, 0x90, 0x2b, 0x6a, 0x4a, 0x3e, 0xc2, 0xd3, + 0xfe, 0x6f, 0x50, 0x2c, 0xb1, 0x46, 0x84, 0xfd, 0x0c, 0xb0, 0x40, 0x4e, 0x17, 0xd6, 0x1e, 0x4b, + 0xf6, 0x2d, 0x36, 0x66, 0xe9, 0x85, 0xc5, 0x53, 0x86, 0xf3, 0x85, 0xc5, 0xff, 0xc7, 0x02, 0x33, + 0x7a, 0x03, 0xc6, 0x03, 0x69, 0x36, 0x31, 0x3e, 0x68, 0x8e, 0x59, 0x61, 0x33, 0x21, 0xfc, 0x75, + 0x84, 0xb1, 0x44, 0x84, 0x1f, 0x6d, 0xc1, 0x98, 0xcd, 0x3d, 0x4c, 0x44, 0xe0, 0x9e, 0x8f, 0x0d, + 0x90, 0x62, 0x4d, 0x66, 0xc1, 0xe7, 0xe1, 0x1e, 0x24, 0x62, 0xfd, 0x77, 0x81, 0xeb, 0xd9, 0x85, + 0x65, 0xda, 0x36, 0x8c, 0x4b, 0x74, 0x83, 0xf8, 0x8d, 0xc9, 0xa4, 0x96, 0x7c, 0x68, 0x51, 0x8a, + 0xcb, 0x08, 0x37, 0xaa, 0xe5, 0xf9, 0xff, 0xc5, 0xa1, 0xfe, 0x8f, 0xe7, 0xfb, 0xf7, 0x26, 0xcb, + 0x42, 0x27, 0xbd, 0xf0, 0x87, 0xca, 0x2f, 0xad, 0xc8, 0x43, 0x3f, 0x91, 0x7d, 0x4e, 0x3a, 0xf1, + 0x2b, 0x44, 0x0a, 0x2c, 0xf7, 0x86, 0x4b, 0x59, 0xee, 0xbd, 0x00, 0x17, 0x84, 0xa5, 0x44, 0x83, + 0x25, 0x7c, 0x0f, 0x7b, 0xc2, 0xb5, 0x81, 0xd9, 0xd0, 0xd4, 0x92, 0x20, 0x9c, 0xae, 0x8b, 0x7e, + 0x43, 0x83, 0x71, 0x53, 0x88, 0x1c, 0x62, 0x5f, 0xad, 0x0c, 0xf6, 0x18, 0xb3, 0x20, 0x25, 0x18, + 0x2e, 0x4c, 0xbf, 0x2c, 0x79, 0x84, 0x2c, 0x3e, 0x25, 0xa5, 0x41, 0xd4, 0x6b, 0xf4, 0x3b, 0xf4, + 0xbe, 0xe0, 0xb0, 0x44, 0x9b, 0xcc, 0xd3, 0x99, 0xfb, 0x5c, 0x3c, 0x18, 0x70, 0x14, 0x8b, 0x31, + 0x46, 0x3e, 0x90, 0xef, 0x8c, 0x6e, 0x05, 0x31, 0xe4, 0x94, 0xc6, 0xa2, 0x76, 0x1f, 0xfd, 0x63, + 0x0d, 0x9e, 0xe2, 0x8e, 0x2e, 0x35, 0x2a, 0x45, 0xb0, 0x7c, 0xe5, 0x24, 0x4e, 0x90, 0x1e, 0xdb, + 0x19, 0x8e, 0x9f, 0xd8, 0xce, 0xf0, 0xe9, 0xc3, 0x83, 0xea, 0x53, 0xb5, 0x63, 0xe0, 0xc6, 0xc7, + 0xea, 0x01, 0x7a, 0x0b, 0xa6, 0x1d, 0x35, 0x1a, 0x8b, 0x60, 0x30, 0xa5, 0x54, 0xfd, 0x89, 0xb0, + 0x2e, 0x5c, 0xb7, 0x9b, 0x28, 0xc2, 0x49, 0x52, 0xf3, 0xbb, 0x30, 0x9d, 0x58, 0x68, 0x67, 0xaa, + 0x24, 0x71, 0x61, 0x36, 0xbd, 0x1e, 0xce, 0xd4, 0xe6, 0xe6, 0x3e, 0x4c, 0x44, 0x07, 0x15, 0x7a, + 0x42, 0x21, 0x14, 0x0b, 0x12, 0xf7, 0x49, 0x8f, 0x53, 0xad, 0x26, 0x2e, 0x78, 0x5c, 0x83, 0xff, + 0x32, 0x2d, 0x10, 0x08, 0xf5, 0xaf, 0x08, 0x0d, 0xfe, 0x06, 0x69, 0x77, 0x1c, 0x23, 0x24, 0xef, + 0xfe, 0xf7, 0x63, 0xfd, 0xbf, 0x6a, 0xfc, 0xbc, 0xe1, 0xc7, 0x2a, 0x32, 0x60, 0xb2, 0xcd, 0x43, + 0x0e, 0x33, 0xe7, 0x7e, 0xad, 0x7c, 0x58, 0x81, 0xd5, 0x18, 0x0d, 0x56, 0x71, 0xa2, 0x87, 0x30, + 0x21, 0x45, 0x1b, 0xa9, 0x91, 0xb8, 0x33, 0x98, 0x60, 0x10, 0x49, 0x51, 0xd1, 0xd3, 0xa4, 0x2c, + 0x09, 0x70, 0x4c, 0x4b, 0x37, 0x00, 0x65, 0xdb, 0xd0, 0x5b, 0xb0, 0x34, 0xa5, 0xd7, 0x92, 0x71, + 0xfc, 0x32, 0xe6, 0xf4, 0x47, 0xa6, 0xd6, 0xd6, 0x7f, 0xb3, 0x02, 0xb9, 0x69, 0xde, 0x90, 0x0e, + 0xa3, 0xdc, 0xbb, 0x4d, 0x66, 0xed, 0xa6, 0xa2, 0x0c, 0x77, 0x7d, 0xc3, 0x02, 0x82, 0x1e, 0x70, + 0x4d, 0x88, 0x6b, 0xb1, 0xf8, 0x79, 0x31, 0x97, 0x50, 0xfd, 0x28, 0x97, 0xf3, 0x2a, 0xe0, 0xfc, + 0x76, 0x68, 0x0f, 0x50, 0xdb, 0xd8, 0x4f, 0x63, 0x1b, 0x20, 0xa1, 0xd2, 0x6a, 0x06, 0x1b, 0xce, + 0xa1, 0x40, 0x0f, 0x52, 0xc3, 0x34, 0x49, 0x27, 0x24, 0x16, 0x1f, 0xa2, 0x7c, 0x40, 0x64, 0x07, + 0xe9, 0x62, 0x12, 0x84, 0xd3, 0x75, 0xf5, 0xaf, 0x0d, 0xc3, 0xb5, 0xe4, 0x24, 0xd2, 0x1d, 0x2a, + 0x1d, 0xd0, 0x5e, 0x94, 0xf6, 0xf5, 0x7c, 0x22, 0x9f, 0x49, 0xdb, 0xd7, 0xcf, 0xd5, 0x7c, 0xc2, + 0x8e, 0x64, 0xc3, 0x09, 0x64, 0xa3, 0x84, 0xad, 0xfd, 0xd7, 0xc1, 0x9b, 0xac, 0xc0, 0x6b, 0x6e, + 0xe8, 0x4c, 0xbd, 0xe6, 0xde, 0xd1, 0x60, 0x3e, 0x59, 0x7c, 0xc7, 0x76, 0xed, 0x60, 0x47, 0x44, + 0x81, 0x3b, 0xb9, 0x79, 0x3f, 0x4b, 0xba, 0xb0, 0x52, 0x88, 0x11, 0xf7, 0xa1, 0x86, 0x3e, 0xab, + 0xc1, 0xf5, 0xd4, 0xbc, 0x24, 0x62, 0xd2, 0x9d, 0xdc, 0xd2, 0x9f, 0xf9, 0xff, 0xae, 0x14, 0xa3, + 0xc4, 0xfd, 0xe8, 0xe9, 0xff, 0xa2, 0x02, 0x23, 0xec, 0xfd, 0xfb, 0xdd, 0x61, 0xf0, 0xcc, 0xba, + 0x5a, 0x68, 0x03, 0xd4, 0x4a, 0xd9, 0x00, 0xbd, 0x58, 0x9e, 0x44, 0x7f, 0x23, 0xa0, 0xef, 0x84, + 0xab, 0xac, 0xda, 0xa2, 0xc5, 0xd4, 0x32, 0x01, 0xb1, 0x16, 0x2d, 0x8b, 0x45, 0x1f, 0x38, 0x5a, + 0x17, 0xfd, 0x04, 0x0c, 0x75, 0x7d, 0x27, 0x1d, 0x8f, 0x63, 0x13, 0xaf, 0x60, 0x5a, 0xae, 0xbf, + 0xa3, 0xc1, 0x2c, 0xc3, 0xad, 0x6c, 0x5f, 0xb4, 0x07, 0xe3, 0xbe, 0xd8, 0xc2, 0xe2, 0xdb, 0xac, + 0x94, 0x1e, 0x5a, 0x0e, 0x5b, 0x10, 0x89, 0x28, 0xc5, 0x2f, 0x1c, 0xd1, 0xd2, 0xbf, 0x3a, 0x0a, + 0x73, 0x45, 0x8d, 0xd0, 0x4f, 0x68, 0x70, 0xd5, 0x8c, 0xa5, 0xb9, 0xc5, 0x6e, 0xb8, 0xe3, 0xf9, + 0x76, 0x68, 0x0b, 0xc3, 0x90, 0x92, 0xd7, 0xdc, 0xda, 0x62, 0xd4, 0x2b, 0x16, 0x43, 0xad, 0x96, + 0x4b, 0x01, 0x17, 0x50, 0x46, 0x6f, 0x03, 0xec, 0xc6, 0x41, 0x5b, 0x2b, 0xe5, 0xd3, 0x43, 0xb0, + 0x61, 0x2b, 0x81, 0x5d, 0x65, 0xa7, 0x98, 0x66, 0x53, 0x29, 0x57, 0xc8, 0x51, 0xe2, 0x41, 0xb0, + 0x73, 0x9f, 0xf4, 0x3a, 0x86, 0x2d, 0x9f, 0xff, 0xcb, 0x13, 0x6f, 0x36, 0xef, 0x09, 0x54, 0x49, + 0xe2, 0x4a, 0xb9, 0x42, 0x0e, 0x7d, 0x4a, 0x83, 0x69, 0x4f, 0x75, 0x55, 0x1e, 0xc4, 0xba, 0x32, + 0xd7, 0xe7, 0x99, 0x8b, 0xd0, 0x49, 0x50, 0x92, 0x24, 0x5d, 0x13, 0x17, 0x83, 0xf4, 0x91, 0x25, + 0x98, 0xda, 0xea, 0xe0, 0x59, 0x64, 0x95, 0xf3, 0x8f, 0x5f, 0xc7, 0xb3, 0xe0, 0x2c, 0x79, 0xd6, + 0x29, 0x12, 0x9a, 0xd6, 0xb2, 0x6b, 0xfa, 0x3d, 0xe6, 0x75, 0x48, 0x3b, 0x35, 0x5a, 0xbe, 0x53, + 0xcb, 0x1b, 0xb5, 0x7a, 0x02, 0x59, 0xb2, 0x53, 0x59, 0x70, 0x96, 0xbc, 0xfe, 0xc9, 0x0a, 0x3c, + 0x56, 0xb0, 0xc6, 0xfe, 0xda, 0xf8, 0x96, 0x7f, 0x59, 0x83, 0x09, 0x36, 0x07, 0xef, 0x12, 0x07, + 0x15, 0xd6, 0xd7, 0x02, 0x2b, 0xb9, 0xdf, 0xd2, 0xe0, 0x62, 0x26, 0x7a, 0xe7, 0xb1, 0xdc, 0x1b, + 0xce, 0xcd, 0x80, 0xeb, 0x7d, 0x71, 0xa4, 0xee, 0xa1, 0xd8, 0x59, 0x36, 0x1d, 0xa5, 0x5b, 0x7f, + 0x05, 0xa6, 0x13, 0x46, 0x72, 0x51, 0x1c, 0x20, 0x2d, 0x37, 0x0e, 0x90, 0x1a, 0xe6, 0xa7, 0xd2, + 0x2f, 0xcc, 0x4f, 0xbc, 0xe4, 0xb3, 0x9c, 0xed, 0xaf, 0xcd, 0x92, 0xff, 0xc3, 0x0b, 0x62, 0xc9, + 0xb3, 0x17, 0x87, 0xd7, 0x61, 0x94, 0x05, 0x15, 0x92, 0x27, 0xe6, 0xed, 0xd2, 0xc1, 0x8a, 0x02, + 0x7e, 0x93, 0xe2, 0xff, 0x63, 0x81, 0x15, 0xd5, 0x61, 0xd6, 0x74, 0xbc, 0xae, 0x25, 0x12, 0x6b, + 0xae, 0xc5, 0x97, 0xb6, 0x28, 0xe6, 0x64, 0x2d, 0x05, 0xc7, 0x99, 0x16, 0x08, 0xf3, 0x37, 0x0b, + 0x7e, 0x9e, 0x95, 0x8a, 0x39, 0x59, 0x5f, 0x6b, 0xf2, 0x9c, 0x0d, 0xd1, 0x5b, 0xc5, 0x9b, 0x00, + 0x44, 0x2e, 0x5e, 0xe9, 0x57, 0xf8, 0x42, 0xb9, 0x68, 0x9a, 0xd1, 0x16, 0x90, 0xc2, 0x67, 0x54, + 0x14, 0x60, 0x85, 0x08, 0xf2, 0x61, 0x72, 0xc7, 0xde, 0x22, 0xbe, 0xcb, 0xe5, 0xa8, 0x91, 0xf2, + 0x22, 0xe2, 0xbd, 0x18, 0x0d, 0xbf, 0xe3, 0x2b, 0x05, 0x58, 0x25, 0x82, 0x7c, 0x2e, 0x8e, 0x70, + 0xf5, 0xf0, 0x20, 0x29, 0xe6, 0x63, 0xbd, 0x73, 0x3c, 0xce, 0xb8, 0x0c, 0x2b, 0x54, 0x90, 0x0b, + 0xe0, 0x46, 0xd1, 0xc4, 0x06, 0x79, 0x71, 0x88, 0x63, 0x92, 0x71, 0xc1, 0x23, 0xfe, 0x8d, 0x15, + 0x0a, 0x74, 0x5e, 0xdb, 0x71, 0x78, 0x3a, 0xa1, 0x43, 0x7c, 0x71, 0xc0, 0x10, 0x81, 0x42, 0x77, + 0x12, 0x17, 0x60, 0x95, 0x08, 0x1d, 0x63, 0x3b, 0x0a, 0x2a, 0x27, 0x74, 0x84, 0xa5, 0xc6, 0x18, + 0x87, 0xa6, 0x13, 0x89, 0xbf, 0xa2, 0xdf, 0x58, 0xa1, 0x80, 0xde, 0x50, 0x9e, 0xba, 0xa0, 0xbc, + 0x06, 0xea, 0x58, 0xcf, 0x5c, 0x1f, 0x8e, 0x15, 0x31, 0x93, 0x6c, 0xaf, 0x5e, 0x57, 0x94, 0x30, + 0x2c, 0xd8, 0x1e, 0xe5, 0x1f, 0x19, 0xa5, 0x4c, 0x6c, 0x9e, 0x3b, 0xd5, 0xd7, 0x3c, 0xb7, 0x46, + 0x25, 0x34, 0xc5, 0x5d, 0x84, 0x31, 0x85, 0xe9, 0xf8, 0x85, 0xa3, 0x99, 0x06, 0xe2, 0x6c, 0x7d, + 0xce, 0xf4, 0x89, 0xc5, 0xda, 0xce, 0xa8, 0x4c, 0x9f, 0x97, 0xe1, 0x08, 0x8a, 0xf6, 0x60, 0x2a, + 0x50, 0x6c, 0x7d, 0x45, 0xb6, 0xc6, 0x01, 0xde, 0xa6, 0x84, 0x9d, 0x2f, 0x0b, 0xb3, 0xa4, 0x96, + 0xe0, 0x04, 0x1d, 0xf4, 0xb6, 0x6a, 0xdc, 0x38, 0x5b, 0xde, 0xb1, 0x33, 0x3f, 0x88, 0x60, 0xac, + 0x61, 0x8b, 0xec, 0xea, 0x54, 0x9b, 0xc3, 0x6e, 0xd2, 0x8c, 0xef, 0xe2, 0xa9, 0x38, 0xb2, 0x1f, + 0x69, 0xe6, 0x47, 0x3f, 0x2d, 0xd9, 0xef, 0x78, 0x41, 0xd7, 0x27, 0x2c, 0x38, 0x2a, 0xfb, 0x3c, + 0x28, 0xfe, 0xb4, 0xcb, 0x69, 0x20, 0xce, 0xd6, 0x47, 0x3f, 0xac, 0xc1, 0x2c, 0x4f, 0x76, 0x49, + 0x8f, 0x2e, 0xcf, 0x25, 0x6e, 0x18, 0xb0, 0x6c, 0x8e, 0x25, 0x7d, 0x2f, 0x9b, 0x29, 0x5c, 0x3c, + 0x43, 0x50, 0xba, 0x14, 0x67, 0x68, 0xd2, 0x95, 0xa3, 0xba, 0xc2, 0xb3, 0xa4, 0x90, 0x25, 0x57, + 0x8e, 0xea, 0x66, 0xcf, 0x57, 0x8e, 0x5a, 0x82, 0x13, 0x74, 0xd0, 0x47, 0x60, 0x3a, 0x90, 0x99, + 0x5b, 0xd8, 0x0c, 0x5e, 0x89, 0x63, 0x55, 0x35, 0x55, 0x00, 0x4e, 0xd6, 0xd3, 0xff, 0xad, 0x06, + 0x10, 0x69, 0x0f, 0xce, 0x43, 0x27, 0x6e, 0x25, 0x14, 0x2a, 0x4b, 0x03, 0x69, 0x3b, 0x48, 0xa1, + 0x66, 0xfc, 0x0f, 0x34, 0x98, 0x89, 0xab, 0x9d, 0x83, 0xa8, 0x6e, 0x26, 0x45, 0xf5, 0x8f, 0x0f, + 0x36, 0xae, 0x02, 0x79, 0xfd, 0xff, 0x54, 0xd4, 0x51, 0x31, 0x69, 0x6c, 0x2f, 0xf1, 0xc6, 0x4c, + 0x49, 0xdf, 0x1b, 0xe4, 0x8d, 0x59, 0x75, 0xcf, 0x8d, 0xc7, 0x9b, 0xf3, 0xe6, 0xfc, 0xb7, 0x12, + 0xb2, 0xd0, 0x00, 0x4e, 0xe8, 0x91, 0xe0, 0x23, 0x49, 0xf3, 0x09, 0x38, 0x4a, 0x30, 0x7a, 0x53, + 0x65, 0x95, 0xfc, 0xb5, 0xfa, 0x13, 0xe5, 0x3c, 0x9f, 0x95, 0x01, 0xf7, 0x65, 0x90, 0xfa, 0x97, + 0xa7, 0x61, 0x52, 0x51, 0xb4, 0xa5, 0x5e, 0xcc, 0xb5, 0xf3, 0x78, 0x31, 0x0f, 0x61, 0xd2, 0x8c, + 0x82, 0x8d, 0xcb, 0x69, 0x1f, 0x90, 0x66, 0xc4, 0xa2, 0xe3, 0x30, 0xe6, 0x01, 0x56, 0xc9, 0x50, + 0x41, 0x22, 0x5a, 0x63, 0x43, 0xa7, 0x60, 0xc7, 0xd0, 0x6f, 0x5d, 0x7d, 0x08, 0x40, 0xca, 0xa2, + 0xc4, 0x12, 0xd1, 0x22, 0x23, 0x23, 0xf4, 0x46, 0x70, 0x2f, 0x82, 0x61, 0xa5, 0x5e, 0xf6, 0x05, + 0x76, 0xe4, 0xdc, 0x5e, 0x60, 0xe9, 0x32, 0x70, 0x64, 0xae, 0x9b, 0x81, 0x6c, 0x72, 0xa2, 0x8c, + 0x39, 0xf1, 0x32, 0x88, 0x8a, 0x02, 0xac, 0x10, 0x29, 0x30, 0x9c, 0x18, 0x2b, 0x65, 0x38, 0xd1, + 0x85, 0x4b, 0x3e, 0x09, 0xfd, 0x5e, 0xad, 0x67, 0xb2, 0x14, 0x50, 0x7e, 0xc8, 0x6e, 0x94, 0xe3, + 0xe5, 0xa2, 0x17, 0xe1, 0x2c, 0x2a, 0x9c, 0x87, 0x3f, 0x21, 0x8c, 0x4d, 0xf4, 0x15, 0xc6, 0x3e, + 0x0c, 0x93, 0x21, 0x31, 0x77, 0x5c, 0xdb, 0x34, 0x9c, 0x46, 0x5d, 0x84, 0x52, 0x8c, 0xe5, 0x8a, + 0x18, 0x84, 0xd5, 0x7a, 0x68, 0x09, 0x86, 0xba, 0xb6, 0x25, 0xa4, 0xd1, 0x6f, 0x89, 0x54, 0xd6, + 0x8d, 0xfa, 0xa3, 0x83, 0xea, 0x7b, 0x63, 0x4b, 0x84, 0x68, 0x54, 0xb7, 0x3a, 0xbb, 0xad, 0x5b, + 0x61, 0xaf, 0x43, 0x82, 0x85, 0xcd, 0x46, 0x1d, 0xd3, 0xc6, 0x79, 0x46, 0x25, 0x53, 0x27, 0x30, + 0x2a, 0xf9, 0xbc, 0x06, 0x97, 0x8c, 0xb4, 0xb6, 0x9d, 0x04, 0x73, 0xd3, 0xe5, 0xb9, 0x65, 0xbe, + 0x06, 0x7f, 0xe9, 0xba, 0x18, 0xdf, 0xa5, 0xc5, 0x2c, 0x39, 0x9c, 0xd7, 0x07, 0xe4, 0x03, 0x6a, + 0xdb, 0xad, 0x28, 0xed, 0x8c, 0xf8, 0xea, 0x33, 0xe5, 0xf4, 0x08, 0xab, 0x19, 0x4c, 0x38, 0x07, + 0x3b, 0x7a, 0x08, 0x93, 0x66, 0xac, 0x93, 0x17, 0x52, 0x75, 0xfd, 0x34, 0x1e, 0x05, 0xf8, 0xcd, + 0x4b, 0x55, 0xf8, 0xab, 0x94, 0xa2, 0xd7, 0x34, 0xe5, 0xca, 0x2b, 0x5e, 0x94, 0xd8, 0xa8, 0x67, + 0xcb, 0xbf, 0xa6, 0xe5, 0x63, 0xc4, 0x7d, 0xa8, 0xb1, 0x98, 0x41, 0x4e, 0x32, 0x3b, 0x14, 0x4b, + 0x8c, 0x5e, 0xd2, 0xcf, 0x38, 0x95, 0x68, 0x8a, 0x2f, 0xcd, 0x54, 0x21, 0x4e, 0x13, 0xd4, 0x7f, + 0x5f, 0x13, 0x0a, 0xb3, 0x73, 0xb4, 0x86, 0x38, 0xeb, 0xa7, 0x34, 0xfd, 0xcf, 0x35, 0xc8, 0xc8, + 0xe8, 0x68, 0x0b, 0xc6, 0x28, 0x8a, 0xfa, 0x5a, 0x53, 0x0c, 0xeb, 0x63, 0xe5, 0x8e, 0x4b, 0x86, + 0x82, 0x6b, 0x1f, 0xc5, 0x0f, 0x2c, 0x11, 0x53, 0xa9, 0xdf, 0x55, 0xa2, 0x39, 0x8b, 0x11, 0x96, + 0x92, 0x47, 0xd4, 0xa8, 0xd0, 0x5c, 0xea, 0x57, 0x4b, 0x70, 0x82, 0x8e, 0xbe, 0x02, 0x10, 0xdf, + 0xab, 0x06, 0x36, 0x90, 0xf9, 0xd3, 0x11, 0xb8, 0x32, 0xa8, 0xb3, 0x01, 0x4b, 0x4a, 0x44, 0xf6, + 0x6c, 0x33, 0x5c, 0xdc, 0x0e, 0x89, 0xff, 0xe0, 0xc1, 0x6a, 0x94, 0xc2, 0xbf, 0x64, 0x56, 0x24, + 0xf6, 0xa0, 0xb6, 0x9c, 0x8b, 0x11, 0x17, 0x50, 0x62, 0x77, 0x4a, 0x91, 0x24, 0x19, 0x53, 0x61, + 0x92, 0xe5, 0xe7, 0xe7, 0x11, 0x53, 0xf8, 0x9d, 0x32, 0x0d, 0xc4, 0xd9, 0xfa, 0x69, 0x24, 0x2b, + 0x76, 0xdb, 0xe6, 0xd9, 0x61, 0xb4, 0x2c, 0x12, 0x06, 0xc4, 0xd9, 0xfa, 0x2a, 0x12, 0xfe, 0xa5, + 0xe8, 0x6e, 0x1f, 0xc9, 0x22, 0x89, 0x80, 0x38, 0x5b, 0x1f, 0x59, 0xf0, 0xb8, 0x4f, 0x4c, 0xaf, + 0xdd, 0x26, 0xae, 0xc5, 0xf3, 0xfd, 0x19, 0x7e, 0xcb, 0x76, 0xef, 0xf8, 0x06, 0xab, 0xc8, 0x54, + 0x74, 0x1a, 0xcb, 0x71, 0xf0, 0x38, 0xee, 0x53, 0x0f, 0xf7, 0xc5, 0x82, 0xda, 0x70, 0x81, 0x27, + 0x17, 0xf2, 0x1b, 0x6e, 0x48, 0xfc, 0x3d, 0xc3, 0x11, 0x7a, 0xb8, 0x52, 0x89, 0x8e, 0x37, 0x93, + 0xa8, 0x70, 0x1a, 0x37, 0xea, 0x51, 0xb9, 0x43, 0x74, 0x47, 0x21, 0x39, 0x5e, 0x3e, 0x6d, 0x17, + 0xce, 0xa2, 0xc3, 0x79, 0x34, 0xf4, 0xcf, 0x6b, 0x20, 0x2c, 0x91, 0xd1, 0xe3, 0x89, 0xb7, 0x8e, + 0xf1, 0xd4, 0x3b, 0x87, 0xcc, 0x6a, 0x50, 0xc9, 0xcd, 0x6a, 0xf0, 0x7e, 0x25, 0x14, 0xcf, 0x44, + 0xcc, 0xfb, 0x38, 0x66, 0x25, 0x23, 0xcb, 0x07, 0x60, 0x82, 0xf0, 0x67, 0xb4, 0x48, 0xa2, 0x65, + 0xd6, 0xdd, 0xcb, 0xb2, 0x10, 0xc7, 0x70, 0xfd, 0xf7, 0x34, 0x10, 0x18, 0x58, 0xfe, 0xa0, 0x63, + 0xe5, 0x91, 0x39, 0xd2, 0xb4, 0x49, 0xc9, 0x7f, 0x33, 0x54, 0x98, 0xff, 0xe6, 0x8c, 0xd2, 0xc2, + 0xfc, 0x8a, 0x06, 0x17, 0x92, 0xb1, 0x91, 0x02, 0xf4, 0x3e, 0x18, 0x13, 0xd1, 0x13, 0x45, 0xf8, + 0x33, 0xd6, 0x54, 0x84, 0x2f, 0xc0, 0x12, 0x96, 0x54, 0x87, 0x0d, 0x70, 0xc5, 0xcc, 0x0f, 0xd1, + 0x74, 0xc4, 0x6d, 0xef, 0xd3, 0xb3, 0x30, 0xca, 0x43, 0xef, 0x51, 0x9e, 0x96, 0xe3, 0xb6, 0x79, + 0xbf, 0x7c, 0x84, 0xbf, 0x32, 0xbe, 0x76, 0x6a, 0x94, 0xfb, 0x4a, 0xdf, 0x28, 0xf7, 0x98, 0xa7, + 0xdb, 0x1a, 0xe0, 0xe9, 0xa3, 0x86, 0x1b, 0x22, 0x7f, 0xb7, 0x4c, 0xb5, 0x15, 0x26, 0xde, 0x04, + 0x86, 0xcb, 0x4b, 0x6e, 0x7c, 0x02, 0x94, 0x97, 0x81, 0x99, 0xbe, 0xaf, 0x02, 0x32, 0xb6, 0xd9, + 0x48, 0x79, 0x53, 0x43, 0x31, 0xe5, 0xc7, 0x88, 0x6d, 0x16, 0x6d, 0xa4, 0xd1, 0xc2, 0x8d, 0xb4, + 0x0d, 0x63, 0x62, 0x2b, 0x08, 0xe6, 0xf8, 0xb1, 0x01, 0xf2, 0x56, 0x29, 0xe1, 0x78, 0x79, 0x01, + 0x96, 0xc8, 0xe9, 0x89, 0xdb, 0x36, 0xf6, 0xed, 0x76, 0xb7, 0xcd, 0x38, 0xe2, 0x88, 0x5a, 0x95, + 0x15, 0x63, 0x09, 0x67, 0x55, 0xb9, 0x85, 0x26, 0xbb, 0x48, 0xa9, 0x55, 0x79, 0x31, 0x96, 0x70, + 0xf4, 0x1a, 0x8c, 0xb7, 0x8d, 0xfd, 0x66, 0xd7, 0x6f, 0x11, 0xf1, 0x22, 0x50, 0x2c, 0xe3, 0x75, + 0x43, 0xdb, 0x59, 0xa0, 0xd7, 0xff, 0xd0, 0x5f, 0x68, 0xb8, 0xe1, 0x03, 0xbf, 0x19, 0xfa, 0x51, + 0xf2, 0x9a, 0x55, 0x81, 0x05, 0x47, 0xf8, 0x90, 0x03, 0x33, 0x6d, 0x63, 0x7f, 0xd3, 0x35, 0xa2, + 0x34, 0xf9, 0x93, 0x25, 0x29, 0xb0, 0x67, 0xe1, 0xd5, 0x04, 0x2e, 0x9c, 0xc2, 0x9d, 0xf3, 0x02, + 0x3d, 0x75, 0x56, 0x2f, 0xd0, 0x8b, 0x91, 0xbf, 0x0d, 0xbf, 0xb7, 0x5d, 0xcb, 0xf5, 0x6c, 0xef, + 0xeb, 0x4b, 0xf3, 0x7a, 0xe4, 0x4b, 0x33, 0x53, 0xfe, 0xc9, 0xb4, 0x8f, 0x1f, 0x4d, 0x17, 0x26, + 0xa9, 0x84, 0xcd, 0x4b, 0xe9, 0xc5, 0xaa, 0xb4, 0x0a, 0xb2, 0x1e, 0xa1, 0x51, 0xd2, 0xae, 0xc6, + 0xa8, 0xb1, 0x4a, 0x07, 0x3d, 0xe0, 0x69, 0xd4, 0x1d, 0x12, 0xc6, 0x55, 0xd8, 0x85, 0x7e, 0x96, + 0xed, 0x9f, 0x28, 0xeb, 0x79, 0xa6, 0x02, 0xce, 0x6f, 0x17, 0x47, 0x61, 0xb9, 0x98, 0x1f, 0x85, + 0x05, 0xfd, 0x58, 0x9e, 0x9e, 0x1f, 0xb1, 0x39, 0xfd, 0x8e, 0xf2, 0xbc, 0xa1, 0xb4, 0xb6, 0xff, + 0x5f, 0x6a, 0x30, 0xd7, 0x2e, 0xc8, 0x4f, 0x2a, 0x9e, 0x1f, 0x36, 0x06, 0xe0, 0x0f, 0x85, 0x39, + 0x4f, 0x97, 0x9e, 0x3a, 0x3c, 0xa8, 0x1e, 0x99, 0x19, 0x15, 0x17, 0xf6, 0x0d, 0xf9, 0x30, 0x16, + 0xf4, 0x02, 0x33, 0x74, 0x82, 0xb9, 0xcb, 0xe5, 0xd3, 0x60, 0x0a, 0xce, 0xda, 0xe4, 0x98, 0x38, + 0x6b, 0x8d, 0x83, 0xc0, 0xf3, 0x52, 0x2c, 0x09, 0x0d, 0xea, 0xa7, 0x3d, 0x40, 0xe0, 0xc9, 0xf9, + 0xdb, 0x30, 0xa5, 0x76, 0xf2, 0x44, 0xee, 0xe1, 0x3f, 0xa7, 0xc1, 0x6c, 0xfa, 0xd0, 0x52, 0x33, + 0xd5, 0x6b, 0x67, 0x9b, 0xa9, 0x5e, 0xb1, 0x7f, 0xa9, 0xf4, 0xb1, 0x7f, 0x79, 0x01, 0xae, 0xe6, + 0xaf, 0x65, 0x2a, 0x41, 0x1a, 0x8e, 0xe3, 0x3d, 0x14, 0x37, 0xb7, 0x38, 0x3f, 0x14, 0x2d, 0xc4, + 0x1c, 0xa6, 0x7f, 0x3f, 0xa4, 0xc3, 0x0c, 0xa3, 0x37, 0x60, 0x22, 0x08, 0x76, 0x78, 0x04, 0x49, + 0x31, 0xc8, 0x72, 0x57, 0x76, 0x19, 0x86, 0x52, 0xb8, 0x34, 0xca, 0x9f, 0x38, 0x46, 0xbf, 0xf4, + 0xea, 0x97, 0xbe, 0x76, 0xe3, 0x3d, 0x5f, 0xf9, 0xda, 0x8d, 0xf7, 0x7c, 0xf5, 0x6b, 0x37, 0xde, + 0xf3, 0x83, 0x87, 0x37, 0xb4, 0x2f, 0x1d, 0xde, 0xd0, 0xbe, 0x72, 0x78, 0x43, 0xfb, 0xea, 0xe1, + 0x0d, 0xed, 0x3f, 0x1d, 0xde, 0xd0, 0x7e, 0xfc, 0x3f, 0xdf, 0x78, 0xcf, 0x6b, 0xcf, 0xc5, 0xd4, + 0x6f, 0x49, 0xa2, 0xf1, 0x3f, 0x9d, 0xdd, 0xd6, 0x2d, 0x4a, 0x5d, 0xba, 0x16, 0x31, 0xea, 0xff, + 0x2f, 0x00, 0x00, 0xff, 0xff, 0x46, 0x8c, 0x06, 0x22, 0xf5, 0xe9, 0x00, 0x00, } func (m *APIServerLogging) Marshal() (dAtA []byte, err error) { @@ -6937,6 +6880,23 @@ func (m *ClusterAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.MaxEmptyBulkDelete != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxEmptyBulkDelete)) + i-- + dAtA[i] = 0x60 + } + if m.NewPodScaleUpDelay != nil { + { + size, err := m.NewPodScaleUpDelay.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if len(m.IgnoreTaints) > 0 { for iNdEx := len(m.IgnoreTaints) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.IgnoreTaints[iNdEx]) @@ -10589,6 +10549,13 @@ func (m *MachineImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.UpdateStrategy != nil { + i -= len(*m.UpdateStrategy) + copy(dAtA[i:], *m.UpdateStrategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UpdateStrategy))) + i-- + dAtA[i] = 0x1a + } if len(m.Versions) > 0 { for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { { @@ -12832,92 +12799,6 @@ func (m *SeedSettingDependencyWatchdog) MarshalToSizedBuffer(dAtA []byte) (int, i-- dAtA[i] = 0x1a } - if m.Probe != nil { - { - size, err := m.Probe.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Endpoint != nil { - { - size, err := m.Endpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SeedSettingDependencyWatchdogEndpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SeedSettingDependencyWatchdogEndpoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SeedSettingDependencyWatchdogEndpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *SeedSettingDependencyWatchdogProbe) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SeedSettingDependencyWatchdogProbe) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SeedSettingDependencyWatchdogProbe) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 return len(dAtA) - i, nil } @@ -16286,6 +16167,13 @@ func (m *ClusterAutoscaler) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.NewPodScaleUpDelay != nil { + l = m.NewPodScaleUpDelay.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxEmptyBulkDelete != nil { + n += 1 + sovGenerated(uint64(*m.MaxEmptyBulkDelete)) + } return n } @@ -17621,6 +17509,10 @@ func (m *MachineImage) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.UpdateStrategy != nil { + l = len(*m.UpdateStrategy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -18451,14 +18343,6 @@ func (m *SeedSettingDependencyWatchdog) Size() (n int) { } var l int _ = l - if m.Endpoint != nil { - l = m.Endpoint.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Probe != nil { - l = m.Probe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } if m.Weeder != nil { l = m.Weeder.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -18470,26 +18354,6 @@ func (m *SeedSettingDependencyWatchdog) Size() (n int) { return n } -func (m *SeedSettingDependencyWatchdogEndpoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - return n -} - -func (m *SeedSettingDependencyWatchdogProbe) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - return n -} - func (m *SeedSettingDependencyWatchdogProber) Size() (n int) { if m == nil { return 0 @@ -19872,6 +19736,8 @@ func (this *ClusterAutoscaler) String() string { `MaxNodeProvisionTime:` + strings.Replace(fmt.Sprintf("%v", this.MaxNodeProvisionTime), "Duration", "v11.Duration", 1) + `,`, `MaxGracefulTerminationSeconds:` + valueToStringGenerated(this.MaxGracefulTerminationSeconds) + `,`, `IgnoreTaints:` + fmt.Sprintf("%v", this.IgnoreTaints) + `,`, + `NewPodScaleUpDelay:` + strings.Replace(fmt.Sprintf("%v", this.NewPodScaleUpDelay), "Duration", "v11.Duration", 1) + `,`, + `MaxEmptyBulkDelete:` + valueToStringGenerated(this.MaxEmptyBulkDelete) + `,`, `}`, }, "") return s @@ -20759,6 +20625,7 @@ func (this *MachineImage) String() string { s := strings.Join([]string{`&MachineImage{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Versions:` + repeatedStringForVersions + `,`, + `UpdateStrategy:` + valueToStringGenerated(this.UpdateStrategy) + `,`, `}`, }, "") return s @@ -21361,34 +21228,12 @@ func (this *SeedSettingDependencyWatchdog) String() string { return "nil" } s := strings.Join([]string{`&SeedSettingDependencyWatchdog{`, - `Endpoint:` + strings.Replace(this.Endpoint.String(), "SeedSettingDependencyWatchdogEndpoint", "SeedSettingDependencyWatchdogEndpoint", 1) + `,`, - `Probe:` + strings.Replace(this.Probe.String(), "SeedSettingDependencyWatchdogProbe", "SeedSettingDependencyWatchdogProbe", 1) + `,`, `Weeder:` + strings.Replace(this.Weeder.String(), "SeedSettingDependencyWatchdogWeeder", "SeedSettingDependencyWatchdogWeeder", 1) + `,`, `Prober:` + strings.Replace(this.Prober.String(), "SeedSettingDependencyWatchdogProber", "SeedSettingDependencyWatchdogProber", 1) + `,`, `}`, }, "") return s } -func (this *SeedSettingDependencyWatchdogEndpoint) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SeedSettingDependencyWatchdogEndpoint{`, - `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, - `}`, - }, "") - return s -} -func (this *SeedSettingDependencyWatchdogProbe) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SeedSettingDependencyWatchdogProbe{`, - `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, - `}`, - }, "") - return s -} func (this *SeedSettingDependencyWatchdogProber) String() string { if this == nil { return "nil" @@ -25738,6 +25583,62 @@ func (m *ClusterAutoscaler) Unmarshal(dAtA []byte) error { } m.IgnoreTaints = append(m.IgnoreTaints, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPodScaleUpDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPodScaleUpDelay == nil { + m.NewPodScaleUpDelay = &v11.Duration{} + } + if err := m.NewPodScaleUpDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxEmptyBulkDelete", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxEmptyBulkDelete = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -36308,6 +36209,39 @@ func (m *MachineImage) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MachineImageUpdateStrategy(dAtA[iNdEx:postIndex]) + m.UpdateStrategy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42958,78 +42892,6 @@ func (m *SeedSettingDependencyWatchdog) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: SeedSettingDependencyWatchdog: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Endpoint == nil { - m.Endpoint = &SeedSettingDependencyWatchdogEndpoint{} - } - if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Probe", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Probe == nil { - m.Probe = &SeedSettingDependencyWatchdogProbe{} - } - if err := m.Probe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Weeder", wireType) @@ -43123,146 +42985,6 @@ func (m *SeedSettingDependencyWatchdog) Unmarshal(dAtA []byte) error { } return nil } -func (m *SeedSettingDependencyWatchdogEndpoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SeedSettingDependencyWatchdogEndpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SeedSettingDependencyWatchdogEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SeedSettingDependencyWatchdogProbe) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SeedSettingDependencyWatchdogProbe: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SeedSettingDependencyWatchdogProbe: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *SeedSettingDependencyWatchdogProber) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto index 4e71ab062..563b15176 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto @@ -406,6 +406,14 @@ message ClusterAutoscaler { // IgnoreTaints specifies a list of taint keys to ignore in node templates when considering to scale a node group. // +optional repeated string ignoreTaints = 10; + + // NewPodScaleUpDelay specifies how long CA should ignore newly created pods before they have to be considered for scale-up. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration newPodScaleUpDelay = 11; + + // MaxEmptyBulkDelete specifies the maximum number of empty nodes that can be deleted at the same time (default: 10). + // +optional + optional int32 maxEmptyBulkDelete = 12; } // Condition holds the information about the state of a resource. @@ -545,7 +553,7 @@ message ControllerRegistrationDeployment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector seedSelector = 4; - // DeploymentRefs holds references to `ControllerDeployments`. Only one element is support now. + // DeploymentRefs holds references to `ControllerDeployments`. Only one element is supported currently. // +optional repeated DeploymentRef deploymentRefs = 5; } @@ -1556,6 +1564,13 @@ message MachineImage { // +patchMergeKey=version // +patchStrategy=merge repeated MachineImageVersion versions = 2; + + // UpdateStrategy is the update strategy to use for the machine image. Possible values are: + // - patch: update to the latest patch version of the current minor version. + // - minor: update to the latest minor and patch version. + // - major: always update to the overall latest version (default). + // +optional + optional string updateStrategy = 3; } // MachineImageVersion is an expirable version with list of supported container runtimes and interfaces @@ -2214,16 +2229,6 @@ message SeedSelector { // SeedSettingDependencyWatchdog controls the dependency-watchdog settings for the seed. message SeedSettingDependencyWatchdog { - // Endpoint controls the endpoint settings for the dependency-watchdog for the seed. - // Deprecated: This field is deprecated and will be removed in a future version of Gardener. Use `Weeder` instead. - // +optional - optional SeedSettingDependencyWatchdogEndpoint endpoint = 1; - - // Probe controls the probe settings for the dependency-watchdog for the seed. - // Deprecated: This field is deprecated and will be removed in a future version of Gardener. Use `Prober` instead. - // +optional - optional SeedSettingDependencyWatchdogProbe probe = 2; - // Weeder controls the weeder settings for the dependency-watchdog for the seed. // +optional optional SeedSettingDependencyWatchdogWeeder weeder = 3; @@ -2233,24 +2238,6 @@ message SeedSettingDependencyWatchdog { optional SeedSettingDependencyWatchdogProber prober = 4; } -// SeedSettingDependencyWatchdogEndpoint controls the endpoint settings for the dependency-watchdog for the seed. -// Deprecated: This type is deprecated and will be removed in a future version of Gardener. Use type `SeedSettingDependencyWatchdogWeeder` instead. -message SeedSettingDependencyWatchdogEndpoint { - // Enabled controls whether the endpoint controller of the dependency-watchdog should be enabled. This controller - // helps to alleviate the delay where control plane components remain unavailable by finding the respective pods in - // CrashLoopBackoff status and restarting them once their dependants become ready and available again. - optional bool enabled = 1; -} - -// SeedSettingDependencyWatchdogProbe controls the probe settings for the dependency-watchdog for the seed. -// Deprecated: This type is deprecated and will be removed in a future version of Gardener. Use type `SeedSettingDependencyWatchdogProber` instead. -message SeedSettingDependencyWatchdogProbe { - // Enabled controls whether the probe controller of the dependency-watchdog should be enabled. This controller - // scales down the kube-controller-manager, machine-controller-manager and cluster-autoscaler of shoot clusters in case their respective kube-apiserver is not - // reachable via its external ingress in order to avoid melt-down situations. - optional bool enabled = 1; -} - // SeedSettingDependencyWatchdogProber controls the prober settings for the dependency-watchdog for the seed. message SeedSettingDependencyWatchdogProber { // Enabled controls whether the probe controller(prober) of the dependency-watchdog should be enabled. This controller diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition.go index 7f0bd42fa..b1aeed4c3 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/condition.go @@ -21,6 +21,7 @@ import ( apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/clock" gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" @@ -166,14 +167,25 @@ func MergeConditions(oldConditions []gardencorev1beta1.Condition, newConditions // RemoveConditions removes the conditions with the given types from the given conditions slice. func RemoveConditions(conditions []gardencorev1beta1.Condition, conditionTypes ...gardencorev1beta1.ConditionType) []gardencorev1beta1.Condition { - conditionTypesMap := make(map[gardencorev1beta1.ConditionType]struct{}, len(conditionTypes)) - for _, conditionType := range conditionTypes { - conditionTypesMap[conditionType] = struct{}{} + unwantedConditionTypes := sets.New(conditionTypes...) + + var newConditions []gardencorev1beta1.Condition + for _, condition := range conditions { + if !unwantedConditionTypes.Has(condition.Type) { + newConditions = append(newConditions, condition) + } } + return newConditions +} + +// RetainConditions retains all given conditionsTypes from the given conditions slice. +func RetainConditions(conditions []gardencorev1beta1.Condition, conditionTypes ...gardencorev1beta1.ConditionType) []gardencorev1beta1.Condition { + wantedConditionsTypes := sets.New(conditionTypes...) + var newConditions []gardencorev1beta1.Condition for _, condition := range conditions { - if _, ok := conditionTypesMap[condition.Type]; !ok { + if wantedConditionsTypes.Has(condition.Type) { newConditions = append(newConditions, condition) } } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go index 0736cfcad..d7093445c 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/helper.go @@ -16,6 +16,7 @@ package helper import ( "fmt" + "slices" "strconv" "strings" "time" @@ -452,7 +453,8 @@ func ShootMachineImageVersionExists(constraint gardencorev1beta1.MachineImage, i return false, 0 } -func toExpirableVersions(versions []gardencorev1beta1.MachineImageVersion) []gardencorev1beta1.ExpirableVersion { +// ToExpirableVersions returns the expirable versions from the given machine image versions. +func ToExpirableVersions(versions []gardencorev1beta1.MachineImageVersion) []gardencorev1beta1.ExpirableVersion { expVersions := []gardencorev1beta1.ExpirableVersion{} for _, version := range versions { expVersions = append(expVersions, version.ExpirableVersion) @@ -460,32 +462,6 @@ func toExpirableVersions(versions []gardencorev1beta1.MachineImageVersion) []gar return expVersions } -// GetLatestQualifyingShootMachineImage determines the latest qualifying version in a machine image and returns that as a ShootMachineImage. -// A version qualifies if its classification is not preview and the version is not expired. -// Older but non-deprecated version is preferred over newer but deprecated one. -func GetLatestQualifyingShootMachineImage(image gardencorev1beta1.MachineImage, predicates ...VersionPredicate) (bool, *gardencorev1beta1.ShootMachineImage, error) { - predicates = append(predicates, FilterExpiredVersion()) - - // Try to find non-deprecated version first - qualifyingVersionFound, latestNonDeprecatedImageVersion, err := GetLatestQualifyingVersion(toExpirableVersions(image.Versions), append(predicates, FilterDeprecatedVersion())...) - if err != nil { - return false, nil, err - } - if qualifyingVersionFound { - return true, &gardencorev1beta1.ShootMachineImage{Name: image.Name, Version: &latestNonDeprecatedImageVersion.Version}, nil - } - - // It looks like there is no non-deprecated version, now look also into the deprecated versions - qualifyingVersionFound, latestImageVersion, err := GetLatestQualifyingVersion(toExpirableVersions(image.Versions), predicates...) - if err != nil { - return false, nil, err - } - if !qualifyingVersionFound { - return false, nil, nil - } - return true, &gardencorev1beta1.ShootMachineImage{Name: image.Name, Version: &latestImageVersion.Version}, nil -} - // FindMachineTypeByName tries to find the machine type details with the given name. If it cannot be found it returns nil. func FindMachineTypeByName(machines []gardencorev1beta1.MachineType, name string) *gardencorev1beta1.MachineType { for _, m := range machines { @@ -542,24 +518,6 @@ func WrapWithLastError(err error, lastError *gardencorev1beta1.LastError) error return fmt.Errorf("last error: %w: %s", err, lastError.Description) } -// IsAPIServerExposureManaged returns true, if the Object is managed by Gardener for API server exposure. -// This indicates to extensions that they should not mutate the object. -// Gardener marks the kube-apiserver Service and Deployment as managed by it when it uses SNI to expose them. -// Deprecated: This function is deprecated and will be removed after Gardener v1.80 has been released. -// TODO(rfranzke): Drop this after v1.80 has been released. -func IsAPIServerExposureManaged(obj metav1.Object) bool { - if obj == nil { - return false - } - - if v, found := obj.GetLabels()[v1beta1constants.LabelAPIServerExposure]; found && - v == v1beta1constants.LabelAPIServerExposureGardenerManaged { - return true - } - - return false -} - // FindPrimaryDNSProvider finds the primary provider among the given `providers`. // It returns the first provider if multiple candidates are found. func FindPrimaryDNSProvider(providers []gardencorev1beta1.DNSProvider) *gardencorev1beta1.DNSProvider { @@ -575,20 +533,89 @@ func FindPrimaryDNSProvider(providers []gardencorev1beta1.DNSProvider) *gardenco // VersionPredicate is a function that evaluates a condition on the given versions. type VersionPredicate func(expirableVersion gardencorev1beta1.ExpirableVersion, version *semver.Version) (bool, error) -// GetKubernetesVersionForPatchUpdate finds the latest Kubernetes patch version for its minor version in the compared -// to the given . Preview and expired versions do not qualify for the kubernetes patch update. In case it does not find a newer patch version, it returns false. Otherwise, -// true and the found version will be returned. -func GetKubernetesVersionForPatchUpdate(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) { +// GetLatestVersionForPatchAutoUpdate finds the latest patch version for a given for the current minor version from a given slice of versions. +// The current version, preview and expired versions do not qualify. +// In case no newer patch version is found, returns false and an empty string. Otherwise, returns true and the found version. +func GetLatestVersionForPatchAutoUpdate(versions []gardencorev1beta1.ExpirableVersion, currentVersion string) (bool, string, error) { + currentSemVerVersion, err := semver.NewVersion(currentVersion) + if err != nil { + return false, "", err + } + + predicates := []VersionPredicate{FilterDifferentMajorMinorVersionAndLowerPatchVersionsOfSameMinor(*currentSemVerVersion)} + + return getVersionForAutoUpdate(versions, currentSemVerVersion, predicates) +} + +// GetLatestVersionForMinorAutoUpdate finds the latest minor with the latest patch version higher than a given for the current major version from a given slice of versions. +// Returns the highest patch version for the current minor in case the current version is not the highest patch version yet. +// The current version, preview and expired versions do not qualify. +// In case no newer version is found, returns false and an empty string. Otherwise, returns true and the found version. +func GetLatestVersionForMinorAutoUpdate(versions []gardencorev1beta1.ExpirableVersion, currentVersion string) (bool, string, error) { + // always first check if there is a higher patch version available + found, version, err := GetLatestVersionForPatchAutoUpdate(versions, currentVersion) + if found { + return found, version, nil + } + if err != nil { + return false, version, err + } + + currentSemVerVersion, err := semver.NewVersion(currentVersion) + if err != nil { + return false, "", err + } + + predicates := []VersionPredicate{FilterDifferentMajorVersion(*currentSemVerVersion)} + + return getVersionForAutoUpdate(versions, currentSemVerVersion, predicates) +} + +// GetOverallLatestVersionForAutoUpdate finds the overall latest version higher than a given for the current major version from a given slice of versions. +// Returns the highest patch version for the current minor in case the current version is not the highest patch version yet. +// The current, preview and expired versions do not qualify. +// In case no newer version is found, returns false and an empty string. Otherwise, returns true and the found version. +func GetOverallLatestVersionForAutoUpdate(versions []gardencorev1beta1.ExpirableVersion, currentVersion string) (bool, string, error) { + // always first check if there is a higher patch version available to update to + found, version, err := GetLatestVersionForPatchAutoUpdate(versions, currentVersion) + if found { + return found, version, nil + } + if err != nil { + return false, version, err + } + currentSemVerVersion, err := semver.NewVersion(currentVersion) if err != nil { return false, "", err } - qualifyingVersionFound, latestVersion, err := GetLatestQualifyingVersion(cloudProfile.Spec.Kubernetes.Versions, FilterDifferentMajorMinorVersion(*currentSemVerVersion), FilterSameVersion(*currentSemVerVersion), FilterExpiredVersion()) + // if there is no higher patch version available, get the overall latest + return getVersionForAutoUpdate(versions, currentSemVerVersion, []VersionPredicate{}) +} + +// getVersionForAutoUpdate finds the latest eligible version higher than a given from a slice of versions. +// Versions <= the current version, preview and expired versions do not qualify for patch updates. +// First tries to find a non-deprecated version. +// In case no newer patch version is found, returns false and an empty string. Otherwise, returns true and the found version. +func getVersionForAutoUpdate(versions []gardencorev1beta1.ExpirableVersion, currentSemVerVersion *semver.Version, predicates []VersionPredicate) (bool, string, error) { + versionPredicates := append([]VersionPredicate{FilterExpiredVersion(), FilterSameVersion(*currentSemVerVersion), FilterLowerVersion(*currentSemVerVersion)}, predicates...) + + // Try to find non-deprecated version first + qualifyingVersionFound, latestNonDeprecatedImageVersion, err := GetLatestQualifyingVersion(versions, append(versionPredicates, FilterDeprecatedVersion())...) + if err != nil { + return false, "", err + } + if qualifyingVersionFound { + return true, latestNonDeprecatedImageVersion.Version, nil + } + + // otherwise, also consider deprecated versions + qualifyingVersionFound, latestVersion, err := GetLatestQualifyingVersion(versions, versionPredicates...) if err != nil { return false, "", err } - // latest version cannot be found. Do not return an error, but allow for minor upgrade if Shoot's machine image version is expired. + // latest version cannot be found. Do not return an error, but allow for forceful upgrade if Shoot's version is expired. if !qualifyingVersionFound { return false, "", nil } @@ -596,24 +623,28 @@ func GetKubernetesVersionForPatchUpdate(cloudProfile *gardencorev1beta1.CloudPro return true, latestVersion.Version, nil } -// GetKubernetesVersionForMinorUpdate finds a Kubernetes version in the that qualifies for a Kubernetes minor level update given a . -// A qualifying version is a non-preview version having the minor version increased by exactly one version. -// In case the consecutive minor version has only expired versions, picks the latest expired version (will do another minor update during the next maintenance time) +// GetVersionForForcefulUpdateToConsecutiveMinor finds a version from a slice of expirable versions that qualifies for a minor level update given a . +// A qualifying version is a non-preview version having the minor version increased by exactly one version (required for Kubernetes version upgrades). +// In case the consecutive minor version has only expired versions, picks the latest expired version (will try another update during the next maintenance time). // If a version can be found, returns true and the qualifying patch version of the next minor version. -// In case it does not find a version, it returns false. -func GetKubernetesVersionForMinorUpdate(cloudProfile *gardencorev1beta1.CloudProfile, currentVersion string) (bool, string, error) { +// In case it does not find a version, it returns false and an empty string. +func GetVersionForForcefulUpdateToConsecutiveMinor(versions []gardencorev1beta1.ExpirableVersion, currentVersion string) (bool, string, error) { currentSemVerVersion, err := semver.NewVersion(currentVersion) if err != nil { return false, "", err } - qualifyingVersionFound, latestVersion, err := GetLatestQualifyingVersion(cloudProfile.Spec.Kubernetes.Versions, FilterNonConsecutiveMinorVersion(*currentSemVerVersion), FilterSameVersion(*currentSemVerVersion), FilterExpiredVersion()) + // filters out any version that does not have minor version +1 + predicates := []VersionPredicate{FilterDifferentMajorVersion(*currentSemVerVersion), FilterNonConsecutiveMinorVersion(*currentSemVerVersion)} + + qualifyingVersionFound, latestVersion, err := GetLatestQualifyingVersion(versions, append(predicates, FilterExpiredVersion())...) if err != nil { return false, "", err } + + // if no qualifying version is found, allow force update to an expired version if !qualifyingVersionFound { - // in case there are only expired versions in the consecutive minor version, pick the latest expired version - qualifyingVersionFound, latestVersion, err = GetLatestQualifyingVersion(cloudProfile.Spec.Kubernetes.Versions, FilterNonConsecutiveMinorVersion(*currentSemVerVersion), FilterSameVersion(*currentSemVerVersion)) + qualifyingVersionFound, latestVersion, err = GetLatestQualifyingVersion(versions, predicates...) if err != nil { return false, "", err } @@ -625,7 +656,78 @@ func GetKubernetesVersionForMinorUpdate(cloudProfile *gardencorev1beta1.CloudPro return true, latestVersion.Version, nil } -// GetLatestQualifyingVersion returns the latest expirable version from a set of expirable versions +// GetVersionForForcefulUpdateToNextHigherMinor finds a version from a slice of expirable versions that qualifies for a minor level update given a . +// A qualifying version is the highest non-preview version with the next higher minor version from the given slice of versions. +// In case the consecutive minor version has only expired versions, picks the latest expired version (will try another update during the next maintenance time). +// If a version can be found, returns true and the qualifying version. +// In case it does not find a version, it returns false and an empty string. +func GetVersionForForcefulUpdateToNextHigherMinor(versions []gardencorev1beta1.ExpirableVersion, currentVersion string) (bool, string, error) { + currentSemVerVersion, err := semver.NewVersion(currentVersion) + if err != nil { + return false, "", err + } + + predicates := []VersionPredicate{FilterDifferentMajorVersion(*currentSemVerVersion), FilterEqualAndSmallerMinorVersion(*currentSemVerVersion)} + + // prefer non-expired version + return getVersionForMachineImageForceUpdate(versions, func(v semver.Version) int64 { return int64(v.Minor()) }, currentSemVerVersion, predicates) +} + +// GetVersionForForcefulUpdateToNextHigherMajor finds a version from a slice of expirable versions that qualifies for a major level update given a . +// A qualifying version is a non-preview version with the next (as defined in the CloudProfile for the image) higher major version. +// In case the next major version has only expired versions, picks the latest expired version (will try another update during the next maintenance time). +// If a version can be found, returns true and the qualifying version of the next major version. +// In case it does not find a version, it returns false and an empty string. +func GetVersionForForcefulUpdateToNextHigherMajor(versions []gardencorev1beta1.ExpirableVersion, currentVersion string) (bool, string, error) { + currentSemVerVersion, err := semver.NewVersion(currentVersion) + if err != nil { + return false, "", err + } + + predicates := []VersionPredicate{FilterEqualAndSmallerMajorVersion(*currentSemVerVersion)} + + // prefer non-expired version + return getVersionForMachineImageForceUpdate(versions, func(v semver.Version) int64 { return int64(v.Major()) }, currentSemVerVersion, predicates) +} + +// getVersionForMachineImageForceUpdate finds a version from a slice of expirable versions that qualifies for an update given a . +// In contrast to determining a version for an auto-update, also allows update to an expired version in case a not-expired version cannot be determined. +// Used only for machine image updates, as finds a qualifying version from the next higher minor version, which is not necessarily consecutive (n+1). +func getVersionForMachineImageForceUpdate(versions []gardencorev1beta1.ExpirableVersion, getMajorOrMinor GetMajorOrMinor, currentSemVerVersion *semver.Version, predicates []VersionPredicate) (bool, string, error) { + foundVersion, qualifyingVersion, nextMinorOrMajorVersion, err := GetQualifyingVersionForNextHigher(versions, getMajorOrMinor, currentSemVerVersion, append(predicates, FilterExpiredVersion())...) + if err != nil { + return false, "", err + } + + skippedNextMajorMinor := false + if foundVersion { + parse, err := semver.NewVersion(qualifyingVersion.Version) + if err != nil { + return false, "", err + } + skippedNextMajorMinor = getMajorOrMinor(*parse) > nextMinorOrMajorVersion + } + + // Two options when allowing updates to expired versions + // 1) No higher non-expired qualifying version could be found at all + // 2) Found a qualifying non-expired version, but we skipped the next minor/major. + // Potentially skipped expired versions in the next minor/major that qualify. + // Prefer update to expired version in next minor/major instead of skipping over minor/major altogether. + // Example: current version: 1.1.0, qualifying version : 1.4.1, next minor: 2. We skipped over the next minor which might have qualifying expired versions. + if !foundVersion || skippedNextMajorMinor { + foundVersion, qualifyingVersion, _, err = GetQualifyingVersionForNextHigher(versions, getMajorOrMinor, currentSemVerVersion, predicates...) + if err != nil { + return false, "", err + } + if !foundVersion { + return false, "", nil + } + } + + return true, qualifyingVersion.Version, nil +} + +// GetLatestQualifyingVersion returns the latest expirable version from a set of expirable versions. // A version qualifies if its classification is not preview and the optional predicate does not filter out the version. // If the predicate returns true, the version is not considered for the latest qualifying version. func GetLatestQualifyingVersion(versions []gardencorev1beta1.ExpirableVersion, predicate ...VersionPredicate) (qualifyingVersionFound bool, latest *gardencorev1beta1.ExpirableVersion, err error) { @@ -670,9 +772,82 @@ OUTER: return true, latestVersion, nil } -// FilterDifferentMajorMinorVersion returns a VersionPredicate(closure) that evaluates whether a given version v has a different same major.minor version compared to the currentSemVerVersion -// returns true if v has a different major.minor version -func FilterDifferentMajorMinorVersion(currentSemVerVersion semver.Version) VersionPredicate { +// GetMajorOrMinor returns either the major or the minor version from a semVer version. +type GetMajorOrMinor func(v semver.Version) int64 + +// GetQualifyingVersionForNextHigher returns the latest expirable version for the next higher {minor/major} (not necessarily consecutive n+1) version from a set of expirable versions. +// A version qualifies if its classification is not preview and the optional predicate does not filter out the version. +// If the predicate returns true, the version is not considered for the latest qualifying version. +func GetQualifyingVersionForNextHigher(versions []gardencorev1beta1.ExpirableVersion, majorOrMinor GetMajorOrMinor, currentSemVerVersion *semver.Version, predicates ...VersionPredicate) (qualifyingVersionFound bool, qualifyingVersion *gardencorev1beta1.ExpirableVersion, nextMinorOrMajor int64, err error) { + // How to find the highest version with the next higher (not necessarily consecutive n+1) minor version (if the next higher minor version has no qualifying version, skip it to avoid consecutive updates) + // 1) Sort the versions in ascending order + // 2) Loop over the sorted array until the minor version changes (select all versions for the next higher minor) + // - predicates filter out version with minor/major <= current_minor/major + // 3) Then select the last version in the array (that's the highest) + + slices.SortFunc(versions, func(a, b gardencorev1beta1.ExpirableVersion) int { + return semver.MustParse(a.Version).Compare(semver.MustParse(b.Version)) + }) + + var ( + highestVersionNextHigherMinorOrMajor *semver.Version + nextMajorOrMinorVersion int64 = -1 + expirableVersionNextHigherMinorOrMajor = gardencorev1beta1.ExpirableVersion{} + ) + +OUTER: + for _, v := range versions { + parse, err := semver.NewVersion(v.Version) + if err != nil { + return false, nil, 0, err + } + + // Determine the next higher minor/major version, even though all versions from that minor/major might be filtered (e.g, all expired) + // That's required so that the caller can determine if the next minor/major version has been skipped or not. + if majorOrMinor(*parse) > majorOrMinor(*currentSemVerVersion) && (majorOrMinor(*parse) < nextMajorOrMinorVersion || nextMajorOrMinorVersion == -1) { + nextMajorOrMinorVersion = majorOrMinor(*parse) + } + + // never update to preview versions + if v.Classification != nil && *v.Classification == gardencorev1beta1.ClassificationPreview { + continue + } + + for _, p := range predicates { + if p == nil { + continue + } + + shouldFilter, err := p(v, parse) + if err != nil { + return false, nil, nextMajorOrMinorVersion, fmt.Errorf("error while evaluation predicate: %w", err) + } + if shouldFilter { + continue OUTER + } + } + + // last version is the highest version for next larger minor/major + if highestVersionNextHigherMinorOrMajor != nil && majorOrMinor(*parse) > majorOrMinor(*highestVersionNextHigherMinorOrMajor) { + break + } + highestVersionNextHigherMinorOrMajor = parse + expirableVersionNextHigherMinorOrMajor = v + } + + // unable to find qualified versions + if highestVersionNextHigherMinorOrMajor == nil { + return false, nil, nextMajorOrMinorVersion, nil + } + return true, &expirableVersionNextHigherMinorOrMajor, nextMajorOrMinorVersion, nil +} + +// FilterDifferentMajorMinorVersionAndLowerPatchVersionsOfSameMinor returns a VersionPredicate(closure) that returns true if a given version v +// - has a different major.minor version compared to the currentSemVerVersion +// - has a lower patch version (acts as >= relational operator) +// +// Uses the tilde range operator. +func FilterDifferentMajorMinorVersionAndLowerPatchVersionsOfSameMinor(currentSemVerVersion semver.Version) VersionPredicate { return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { isWithinRange, err := versionutils.CompareVersions(v.String(), "~", currentSemVerVersion.String()) if err != nil { @@ -683,7 +858,9 @@ func FilterDifferentMajorMinorVersion(currentSemVerVersion semver.Version) Versi } // FilterNonConsecutiveMinorVersion returns a VersionPredicate(closure) that evaluates whether a given version v has a consecutive minor version compared to the currentSemVerVersion -// returns true if v does not have a consecutive minor version +// - implicitly, therefore also versions cannot be smaller than the current version +// +// returns true if v does not have a consecutive minor version. func FilterNonConsecutiveMinorVersion(currentSemVerVersion semver.Version) VersionPredicate { return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { if v.Major() != currentSemVerVersion.Major() { @@ -695,8 +872,32 @@ func FilterNonConsecutiveMinorVersion(currentSemVerVersion semver.Version) Versi } } -// FilterSameVersion returns a VersionPredicate(closure) that evaluates whether a given version v is equal to the currentSemVerVersion -// returns true if it is equal +// FilterDifferentMajorVersion returns a VersionPredicate(closure) that evaluates whether a given version v has the same major version compared to the currentSemVerVersion. +// Returns true if v does not have the same major version. +func FilterDifferentMajorVersion(currentSemVerVersion semver.Version) VersionPredicate { + return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { + return v.Major() != currentSemVerVersion.Major(), nil + } +} + +// FilterEqualAndSmallerMajorVersion returns a VersionPredicate(closure) that evaluates whether a given version v has a smaller major version compared to the currentSemVerVersion. +// Returns true if v has a smaller or equal major version. +func FilterEqualAndSmallerMajorVersion(currentSemVerVersion semver.Version) VersionPredicate { + return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { + return v.Major() <= currentSemVerVersion.Major(), nil + } +} + +// FilterEqualAndSmallerMinorVersion returns a VersionPredicate(closure) that evaluates whether a given version v has a smaller or equal minor version compared to the currentSemVerVersion. +// Returns true if v has a smaller or equal minor version. +func FilterEqualAndSmallerMinorVersion(currentSemVerVersion semver.Version) VersionPredicate { + return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { + return v.Minor() <= currentSemVerVersion.Minor(), nil + } +} + +// FilterSameVersion returns a VersionPredicate(closure) that evaluates whether a given version v is equal to the currentSemVerVersion. +// returns true if it is equal. func FilterSameVersion(currentSemVerVersion semver.Version) VersionPredicate { return func(_ gardencorev1beta1.ExpirableVersion, v *semver.Version) (bool, error) { return v.Equal(¤tSemVerVersion), nil @@ -727,7 +928,7 @@ func FilterDeprecatedVersion() func(expirableVersion gardencorev1beta1.Expirable } } -// GetResourceByName returns the first NamedResourceReference with the given name in the given slice, or nil if not found. +// GetResourceByName returns the NamedResourceReference with the given name in the given slice, or nil if not found. func GetResourceByName(resources []gardencorev1beta1.NamedResourceReference, name string) *gardencorev1beta1.NamedResourceReference { for _, resource := range resources { if resource.Name == name { diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go index 6617a8bdb..e8071afa2 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_cloudprofile.go @@ -113,6 +113,12 @@ type MachineImage struct { // +patchMergeKey=version // +patchStrategy=merge Versions []MachineImageVersion `json:"versions" patchStrategy:"merge" patchMergeKey:"version" protobuf:"bytes,2,rep,name=versions"` + // UpdateStrategy is the update strategy to use for the machine image. Possible values are: + // - patch: update to the latest patch version of the current minor version. + // - minor: update to the latest minor and patch version. + // - major: always update to the overall latest version (default). + // +optional + UpdateStrategy *MachineImageUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy,casttype=MachineImageUpdateStrategy"` } // MachineImageVersion is an expirable version with list of supported container runtimes and interfaces @@ -245,3 +251,17 @@ const ( // and will eventually expire. ClassificationDeprecated VersionClassification = "deprecated" ) + +// MachineImageUpdateStrategy is the update strategy to use for a machine image +type MachineImageUpdateStrategy string + +const ( + // UpdateStrategyPatch indicates that auto-updates are performed to the latest patch version of the current minor version. + // When using an expired version during the maintenance window, force updates to the latest patch of the next (not necessarily consecutive) minor when using an expired version. + UpdateStrategyPatch MachineImageUpdateStrategy = "patch" + // UpdateStrategyMinor indicates that auto-updates are performed to the latest patch and minor version of the current major version. + // When using an expired version during the maintenance window, force updates to the latest minor and patch of the next (not necessarily consecutive) major version. + UpdateStrategyMinor MachineImageUpdateStrategy = "minor" + // UpdateStrategyMajor indicates that auto-updates are performed always to the overall latest version. + UpdateStrategyMajor MachineImageUpdateStrategy = "major" +) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go index 7f8f0abd2..a18eced5d 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_common.go @@ -14,7 +14,9 @@ package v1beta1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // ErrorCode is a string alias. type ErrorCode string diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go index a80c13707..70e39b0d3 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_controllerregistration.go @@ -104,7 +104,7 @@ type ControllerRegistrationDeployment struct { // An empty list means that all seeds are selected. // +optional SeedSelector *metav1.LabelSelector `json:"seedSelector,omitempty" protobuf:"bytes,4,opt,name=seedSelector"` - // DeploymentRefs holds references to `ControllerDeployments`. Only one element is support now. + // DeploymentRefs holds references to `ControllerDeployments`. Only one element is supported currently. // +optional DeploymentRefs []DeploymentRef `json:"deploymentRefs,omitempty" protobuf:"bytes,5,opt,name=deploymentRefs"` } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go index b166aada1..847d41612 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_seed.go @@ -340,14 +340,11 @@ type SeedSettingVerticalPodAutoscaler struct { // SeedSettingDependencyWatchdog controls the dependency-watchdog settings for the seed. type SeedSettingDependencyWatchdog struct { - // Endpoint controls the endpoint settings for the dependency-watchdog for the seed. - // Deprecated: This field is deprecated and will be removed in a future version of Gardener. Use `Weeder` instead. - // +optional - Endpoint *SeedSettingDependencyWatchdogEndpoint `json:"endpoint,omitempty" protobuf:"bytes,1,opt,name=endpoint"` - // Probe controls the probe settings for the dependency-watchdog for the seed. - // Deprecated: This field is deprecated and will be removed in a future version of Gardener. Use `Prober` instead. - // +optional - Probe *SeedSettingDependencyWatchdogProbe `json:"probe,omitempty" protobuf:"bytes,2,opt,name=probe"` + // Endpoint is tombstoned to show why 1 is reserved protobuf tag. + // Endpoint *SeedSettingDependencyWatchdogEndpoint `json:"endpoint,omitempty" protobuf:"bytes,1,opt,name=endpoint"` + // Probe is tombstoned to show why 2 is reserved protobuf tag. + // Probe *SeedSettingDependencyWatchdogProbe `json:"probe,omitempty" protobuf:"bytes,2,opt,name=probe"` + // Weeder controls the weeder settings for the dependency-watchdog for the seed. // +optional Weeder *SeedSettingDependencyWatchdogWeeder `json:"weeder,omitempty" protobuf:"bytes,3,opt,name=weeder"` @@ -356,24 +353,6 @@ type SeedSettingDependencyWatchdog struct { Prober *SeedSettingDependencyWatchdogProber `json:"prober,omitempty" protobuf:"bytes,4,opt,name=prober"` } -// SeedSettingDependencyWatchdogEndpoint controls the endpoint settings for the dependency-watchdog for the seed. -// Deprecated: This type is deprecated and will be removed in a future version of Gardener. Use type `SeedSettingDependencyWatchdogWeeder` instead. -type SeedSettingDependencyWatchdogEndpoint struct { - // Enabled controls whether the endpoint controller of the dependency-watchdog should be enabled. This controller - // helps to alleviate the delay where control plane components remain unavailable by finding the respective pods in - // CrashLoopBackoff status and restarting them once their dependants become ready and available again. - Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"` -} - -// SeedSettingDependencyWatchdogProbe controls the probe settings for the dependency-watchdog for the seed. -// Deprecated: This type is deprecated and will be removed in a future version of Gardener. Use type `SeedSettingDependencyWatchdogProber` instead. -type SeedSettingDependencyWatchdogProbe struct { - // Enabled controls whether the probe controller of the dependency-watchdog should be enabled. This controller - // scales down the kube-controller-manager, machine-controller-manager and cluster-autoscaler of shoot clusters in case their respective kube-apiserver is not - // reachable via its external ingress in order to avoid melt-down situations. - Enabled bool `json:"enabled" protobuf:"bytes,1,opt,name=enabled"` -} - // SeedSettingDependencyWatchdogWeeder controls the weeder settings for the dependency-watchdog for the seed. type SeedSettingDependencyWatchdogWeeder struct { // Enabled controls whether the endpoint controller(weeder) of the dependency-watchdog should be enabled. This controller diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go index 967e5b166..ebb9c2921 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go @@ -593,6 +593,12 @@ type ClusterAutoscaler struct { // IgnoreTaints specifies a list of taint keys to ignore in node templates when considering to scale a node group. // +optional IgnoreTaints []string `json:"ignoreTaints,omitempty" protobuf:"bytes,10,opt,name=ignoreTaints"` + // NewPodScaleUpDelay specifies how long CA should ignore newly created pods before they have to be considered for scale-up. + // +optional + NewPodScaleUpDelay *metav1.Duration `json:"newPodScaleUpDelay,omitempty" protobuf:"bytes,11,opt,name=newPodScaleUpDelay"` + // MaxEmptyBulkDelete specifies the maximum number of empty nodes that can be deleted at the same time (default: 10). + // +optional + MaxEmptyBulkDelete *int32 `json:"maxEmptyBulkDelete,omitempty" protobuf:"varint,12,opt,name=maxEmptyBulkDelete"` } // ExpanderMode is type used for Expander values diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go index 573685ddf..c0aef342e 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.conversion.go @@ -1267,26 +1267,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*SeedSettingDependencyWatchdogEndpoint)(nil), (*core.SeedSettingDependencyWatchdogEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_SeedSettingDependencyWatchdogEndpoint_To_core_SeedSettingDependencyWatchdogEndpoint(a.(*SeedSettingDependencyWatchdogEndpoint), b.(*core.SeedSettingDependencyWatchdogEndpoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.SeedSettingDependencyWatchdogEndpoint)(nil), (*SeedSettingDependencyWatchdogEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SeedSettingDependencyWatchdogEndpoint_To_v1beta1_SeedSettingDependencyWatchdogEndpoint(a.(*core.SeedSettingDependencyWatchdogEndpoint), b.(*SeedSettingDependencyWatchdogEndpoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*SeedSettingDependencyWatchdogProbe)(nil), (*core.SeedSettingDependencyWatchdogProbe)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_SeedSettingDependencyWatchdogProbe_To_core_SeedSettingDependencyWatchdogProbe(a.(*SeedSettingDependencyWatchdogProbe), b.(*core.SeedSettingDependencyWatchdogProbe), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*core.SeedSettingDependencyWatchdogProbe)(nil), (*SeedSettingDependencyWatchdogProbe)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_core_SeedSettingDependencyWatchdogProbe_To_v1beta1_SeedSettingDependencyWatchdogProbe(a.(*core.SeedSettingDependencyWatchdogProbe), b.(*SeedSettingDependencyWatchdogProbe), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*SeedSettingDependencyWatchdogProber)(nil), (*core.SeedSettingDependencyWatchdogProber)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_SeedSettingDependencyWatchdogProber_To_core_SeedSettingDependencyWatchdogProber(a.(*SeedSettingDependencyWatchdogProber), b.(*core.SeedSettingDependencyWatchdogProber), scope) }); err != nil { @@ -2328,6 +2308,8 @@ func autoConvert_v1beta1_ClusterAutoscaler_To_core_ClusterAutoscaler(in *Cluster out.MaxNodeProvisionTime = (*metav1.Duration)(unsafe.Pointer(in.MaxNodeProvisionTime)) out.MaxGracefulTerminationSeconds = (*int32)(unsafe.Pointer(in.MaxGracefulTerminationSeconds)) out.IgnoreTaints = *(*[]string)(unsafe.Pointer(&in.IgnoreTaints)) + out.NewPodScaleUpDelay = (*metav1.Duration)(unsafe.Pointer(in.NewPodScaleUpDelay)) + out.MaxEmptyBulkDelete = (*int32)(unsafe.Pointer(in.MaxEmptyBulkDelete)) return nil } @@ -2347,6 +2329,8 @@ func autoConvert_core_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(in *core.Cl out.MaxNodeProvisionTime = (*metav1.Duration)(unsafe.Pointer(in.MaxNodeProvisionTime)) out.MaxGracefulTerminationSeconds = (*int32)(unsafe.Pointer(in.MaxGracefulTerminationSeconds)) out.IgnoreTaints = *(*[]string)(unsafe.Pointer(&in.IgnoreTaints)) + out.NewPodScaleUpDelay = (*metav1.Duration)(unsafe.Pointer(in.NewPodScaleUpDelay)) + out.MaxEmptyBulkDelete = (*int32)(unsafe.Pointer(in.MaxEmptyBulkDelete)) return nil } @@ -4010,6 +3994,7 @@ func Convert_core_MachineControllerManagerSettings_To_v1beta1_MachineControllerM func autoConvert_v1beta1_MachineImage_To_core_MachineImage(in *MachineImage, out *core.MachineImage, s conversion.Scope) error { out.Name = in.Name out.Versions = *(*[]core.MachineImageVersion)(unsafe.Pointer(&in.Versions)) + out.UpdateStrategy = (*core.MachineImageUpdateStrategy)(unsafe.Pointer(in.UpdateStrategy)) return nil } @@ -4021,6 +4006,7 @@ func Convert_v1beta1_MachineImage_To_core_MachineImage(in *MachineImage, out *co func autoConvert_core_MachineImage_To_v1beta1_MachineImage(in *core.MachineImage, out *MachineImage, s conversion.Scope) error { out.Name = in.Name out.Versions = *(*[]MachineImageVersion)(unsafe.Pointer(&in.Versions)) + out.UpdateStrategy = (*MachineImageUpdateStrategy)(unsafe.Pointer(in.UpdateStrategy)) return nil } @@ -5093,8 +5079,6 @@ func Convert_core_SeedSelector_To_v1beta1_SeedSelector(in *core.SeedSelector, ou } func autoConvert_v1beta1_SeedSettingDependencyWatchdog_To_core_SeedSettingDependencyWatchdog(in *SeedSettingDependencyWatchdog, out *core.SeedSettingDependencyWatchdog, s conversion.Scope) error { - out.Endpoint = (*core.SeedSettingDependencyWatchdogEndpoint)(unsafe.Pointer(in.Endpoint)) - out.Probe = (*core.SeedSettingDependencyWatchdogProbe)(unsafe.Pointer(in.Probe)) out.Weeder = (*core.SeedSettingDependencyWatchdogWeeder)(unsafe.Pointer(in.Weeder)) out.Prober = (*core.SeedSettingDependencyWatchdogProber)(unsafe.Pointer(in.Prober)) return nil @@ -5106,8 +5090,6 @@ func Convert_v1beta1_SeedSettingDependencyWatchdog_To_core_SeedSettingDependency } func autoConvert_core_SeedSettingDependencyWatchdog_To_v1beta1_SeedSettingDependencyWatchdog(in *core.SeedSettingDependencyWatchdog, out *SeedSettingDependencyWatchdog, s conversion.Scope) error { - out.Endpoint = (*SeedSettingDependencyWatchdogEndpoint)(unsafe.Pointer(in.Endpoint)) - out.Probe = (*SeedSettingDependencyWatchdogProbe)(unsafe.Pointer(in.Probe)) out.Weeder = (*SeedSettingDependencyWatchdogWeeder)(unsafe.Pointer(in.Weeder)) out.Prober = (*SeedSettingDependencyWatchdogProber)(unsafe.Pointer(in.Prober)) return nil @@ -5118,46 +5100,6 @@ func Convert_core_SeedSettingDependencyWatchdog_To_v1beta1_SeedSettingDependency return autoConvert_core_SeedSettingDependencyWatchdog_To_v1beta1_SeedSettingDependencyWatchdog(in, out, s) } -func autoConvert_v1beta1_SeedSettingDependencyWatchdogEndpoint_To_core_SeedSettingDependencyWatchdogEndpoint(in *SeedSettingDependencyWatchdogEndpoint, out *core.SeedSettingDependencyWatchdogEndpoint, s conversion.Scope) error { - out.Enabled = in.Enabled - return nil -} - -// Convert_v1beta1_SeedSettingDependencyWatchdogEndpoint_To_core_SeedSettingDependencyWatchdogEndpoint is an autogenerated conversion function. -func Convert_v1beta1_SeedSettingDependencyWatchdogEndpoint_To_core_SeedSettingDependencyWatchdogEndpoint(in *SeedSettingDependencyWatchdogEndpoint, out *core.SeedSettingDependencyWatchdogEndpoint, s conversion.Scope) error { - return autoConvert_v1beta1_SeedSettingDependencyWatchdogEndpoint_To_core_SeedSettingDependencyWatchdogEndpoint(in, out, s) -} - -func autoConvert_core_SeedSettingDependencyWatchdogEndpoint_To_v1beta1_SeedSettingDependencyWatchdogEndpoint(in *core.SeedSettingDependencyWatchdogEndpoint, out *SeedSettingDependencyWatchdogEndpoint, s conversion.Scope) error { - out.Enabled = in.Enabled - return nil -} - -// Convert_core_SeedSettingDependencyWatchdogEndpoint_To_v1beta1_SeedSettingDependencyWatchdogEndpoint is an autogenerated conversion function. -func Convert_core_SeedSettingDependencyWatchdogEndpoint_To_v1beta1_SeedSettingDependencyWatchdogEndpoint(in *core.SeedSettingDependencyWatchdogEndpoint, out *SeedSettingDependencyWatchdogEndpoint, s conversion.Scope) error { - return autoConvert_core_SeedSettingDependencyWatchdogEndpoint_To_v1beta1_SeedSettingDependencyWatchdogEndpoint(in, out, s) -} - -func autoConvert_v1beta1_SeedSettingDependencyWatchdogProbe_To_core_SeedSettingDependencyWatchdogProbe(in *SeedSettingDependencyWatchdogProbe, out *core.SeedSettingDependencyWatchdogProbe, s conversion.Scope) error { - out.Enabled = in.Enabled - return nil -} - -// Convert_v1beta1_SeedSettingDependencyWatchdogProbe_To_core_SeedSettingDependencyWatchdogProbe is an autogenerated conversion function. -func Convert_v1beta1_SeedSettingDependencyWatchdogProbe_To_core_SeedSettingDependencyWatchdogProbe(in *SeedSettingDependencyWatchdogProbe, out *core.SeedSettingDependencyWatchdogProbe, s conversion.Scope) error { - return autoConvert_v1beta1_SeedSettingDependencyWatchdogProbe_To_core_SeedSettingDependencyWatchdogProbe(in, out, s) -} - -func autoConvert_core_SeedSettingDependencyWatchdogProbe_To_v1beta1_SeedSettingDependencyWatchdogProbe(in *core.SeedSettingDependencyWatchdogProbe, out *SeedSettingDependencyWatchdogProbe, s conversion.Scope) error { - out.Enabled = in.Enabled - return nil -} - -// Convert_core_SeedSettingDependencyWatchdogProbe_To_v1beta1_SeedSettingDependencyWatchdogProbe is an autogenerated conversion function. -func Convert_core_SeedSettingDependencyWatchdogProbe_To_v1beta1_SeedSettingDependencyWatchdogProbe(in *core.SeedSettingDependencyWatchdogProbe, out *SeedSettingDependencyWatchdogProbe, s conversion.Scope) error { - return autoConvert_core_SeedSettingDependencyWatchdogProbe_To_v1beta1_SeedSettingDependencyWatchdogProbe(in, out, s) -} - func autoConvert_v1beta1_SeedSettingDependencyWatchdogProber_To_core_SeedSettingDependencyWatchdogProber(in *SeedSettingDependencyWatchdogProber, out *core.SeedSettingDependencyWatchdogProber, s conversion.Scope) error { out.Enabled = in.Enabled return nil diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go index 2cc2615bf..98df27cd5 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -729,6 +729,16 @@ func (in *ClusterAutoscaler) DeepCopyInto(out *ClusterAutoscaler) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.NewPodScaleUpDelay != nil { + in, out := &in.NewPodScaleUpDelay, &out.NewPodScaleUpDelay + *out = new(metav1.Duration) + **out = **in + } + if in.MaxEmptyBulkDelete != nil { + in, out := &in.MaxEmptyBulkDelete, &out.MaxEmptyBulkDelete + *out = new(int32) + **out = **in + } return } @@ -2631,6 +2641,11 @@ func (in *MachineImage) DeepCopyInto(out *MachineImage) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(MachineImageUpdateStrategy) + **out = **in + } return } @@ -3801,16 +3816,6 @@ func (in *SeedSelector) DeepCopy() *SeedSelector { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedSettingDependencyWatchdog) DeepCopyInto(out *SeedSettingDependencyWatchdog) { *out = *in - if in.Endpoint != nil { - in, out := &in.Endpoint, &out.Endpoint - *out = new(SeedSettingDependencyWatchdogEndpoint) - **out = **in - } - if in.Probe != nil { - in, out := &in.Probe, &out.Probe - *out = new(SeedSettingDependencyWatchdogProbe) - **out = **in - } if in.Weeder != nil { in, out := &in.Weeder, &out.Weeder *out = new(SeedSettingDependencyWatchdogWeeder) @@ -3834,38 +3839,6 @@ func (in *SeedSettingDependencyWatchdog) DeepCopy() *SeedSettingDependencyWatchd return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SeedSettingDependencyWatchdogEndpoint) DeepCopyInto(out *SeedSettingDependencyWatchdogEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingDependencyWatchdogEndpoint. -func (in *SeedSettingDependencyWatchdogEndpoint) DeepCopy() *SeedSettingDependencyWatchdogEndpoint { - if in == nil { - return nil - } - out := new(SeedSettingDependencyWatchdogEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SeedSettingDependencyWatchdogProbe) DeepCopyInto(out *SeedSettingDependencyWatchdogProbe) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingDependencyWatchdogProbe. -func (in *SeedSettingDependencyWatchdogProbe) DeepCopy() *SeedSettingDependencyWatchdogProbe { - if in == nil { - return nil - } - out := new(SeedSettingDependencyWatchdogProbe) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedSettingDependencyWatchdogProber) DeepCopyInto(out *SeedSettingDependencyWatchdogProber) { *out = *in diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go index 752ae794d..64aeb9796 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/zz_generated.defaults.go @@ -47,6 +47,7 @@ func RegisterDefaults(scheme *runtime.Scheme) error { func SetObjectDefaults_CloudProfile(in *CloudProfile) { for i := range in.Spec.MachineImages { a := &in.Spec.MachineImages[i] + SetDefaults_MachineImage(a) for j := range a.Versions { b := &a.Versions[j] SetDefaults_MachineImageVersion(b) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go index f31320801..1c213ccc6 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/zz_generated.deepcopy.go @@ -729,6 +729,16 @@ func (in *ClusterAutoscaler) DeepCopyInto(out *ClusterAutoscaler) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.NewPodScaleUpDelay != nil { + in, out := &in.NewPodScaleUpDelay, &out.NewPodScaleUpDelay + *out = new(metav1.Duration) + **out = **in + } + if in.MaxEmptyBulkDelete != nil { + in, out := &in.MaxEmptyBulkDelete, &out.MaxEmptyBulkDelete + *out = new(int32) + **out = **in + } return } @@ -2626,6 +2636,11 @@ func (in *MachineImage) DeepCopyInto(out *MachineImage) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.UpdateStrategy != nil { + in, out := &in.UpdateStrategy, &out.UpdateStrategy + *out = new(MachineImageUpdateStrategy) + **out = **in + } return } @@ -3796,16 +3811,6 @@ func (in *SeedSelector) DeepCopy() *SeedSelector { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedSettingDependencyWatchdog) DeepCopyInto(out *SeedSettingDependencyWatchdog) { *out = *in - if in.Endpoint != nil { - in, out := &in.Endpoint, &out.Endpoint - *out = new(SeedSettingDependencyWatchdogEndpoint) - **out = **in - } - if in.Probe != nil { - in, out := &in.Probe, &out.Probe - *out = new(SeedSettingDependencyWatchdogProbe) - **out = **in - } if in.Weeder != nil { in, out := &in.Weeder, &out.Weeder *out = new(SeedSettingDependencyWatchdogWeeder) @@ -3829,38 +3834,6 @@ func (in *SeedSettingDependencyWatchdog) DeepCopy() *SeedSettingDependencyWatchd return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SeedSettingDependencyWatchdogEndpoint) DeepCopyInto(out *SeedSettingDependencyWatchdogEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingDependencyWatchdogEndpoint. -func (in *SeedSettingDependencyWatchdogEndpoint) DeepCopy() *SeedSettingDependencyWatchdogEndpoint { - if in == nil { - return nil - } - out := new(SeedSettingDependencyWatchdogEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SeedSettingDependencyWatchdogProbe) DeepCopyInto(out *SeedSettingDependencyWatchdogProbe) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeedSettingDependencyWatchdogProbe. -func (in *SeedSettingDependencyWatchdogProbe) DeepCopy() *SeedSettingDependencyWatchdogProbe { - if in == nil { - return nil - } - out := new(SeedSettingDependencyWatchdogProbe) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedSettingDependencyWatchdogProber) DeepCopyInto(out *SeedSettingDependencyWatchdogProber) { *out = *in diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/helper/filecodec.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/helper/filecodec.go index e085276e3..bb07d173a 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/helper/filecodec.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/helper/filecodec.go @@ -15,20 +15,15 @@ package helper import ( - "bytes" - "compress/gzip" "encoding/base64" "fmt" - "io" extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" ) var validFileCodecIDs = map[extensionsv1alpha1.FileCodecID]struct{}{ - extensionsv1alpha1.PlainFileCodecID: {}, - extensionsv1alpha1.B64FileCodecID: {}, - extensionsv1alpha1.GZIPFileCodecID: {}, - extensionsv1alpha1.GZIPB64FileCodecID: {}, + extensionsv1alpha1.PlainFileCodecID: {}, + extensionsv1alpha1.B64FileCodecID: {}, } // FileCodec is a codec to en- and decode data in cloud-init scripts with.j @@ -42,8 +37,6 @@ var ( PlainFileCodec FileCodec = plainFileCodec{} // B64FileCodec is the base64 FileCodec. B64FileCodec FileCodec = b64FileCodec{} - // GZIPFileCodec is the gzip FileCodec. - GZIPFileCodec FileCodec = gzipFileCodec{} ) type plainFileCodec struct{} @@ -71,29 +64,6 @@ func (b64FileCodec) Decode(data []byte) ([]byte, error) { return dst[:n], err } -type gzipFileCodec struct{} - -func (gzipFileCodec) Encode(data []byte) ([]byte, error) { - var out bytes.Buffer - w := gzip.NewWriter(&out) - if _, err := w.Write(data); err != nil { - return nil, err - } - if err := w.Close(); err != nil { - return nil, err - } - return out.Bytes(), nil -} - -func (gzipFileCodec) Decode(data []byte) ([]byte, error) { - r, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - return nil, err - } - defer func() { _ = r.Close() }() - return io.ReadAll(r) -} - // ParseFileCodecID tries to parse a string into a FileCodecID. func ParseFileCodecID(s string) (extensionsv1alpha1.FileCodecID, error) { id := extensionsv1alpha1.FileCodecID(s) @@ -106,7 +76,6 @@ func ParseFileCodecID(s string) (extensionsv1alpha1.FileCodecID, error) { var fileCodecIDToFileCodec = map[extensionsv1alpha1.FileCodecID]FileCodec{ extensionsv1alpha1.PlainFileCodecID: PlainFileCodec, extensionsv1alpha1.B64FileCodecID: B64FileCodec, - extensionsv1alpha1.GZIPFileCodecID: GZIPFileCodec, } // FileCodecForID retrieves the FileCodec for the given FileCodecID. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go index 8ec7c161f..9eb156e30 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/types_operatingsystemconfig.go @@ -53,7 +53,6 @@ func (o *OperatingSystemConfig) GetExtensionSpec() Spec { // GetExtensionPurpose implements Object. func (o *OperatingSystemConfigSpec) GetExtensionPurpose() *string { return (*string)(&o.Purpose) - } // GetExtensionStatus implements Object. @@ -89,6 +88,7 @@ type OperatingSystemConfigSpec struct { // are asked to use it when determining the .status.command of this resource. For example, if for CoreOS // the reload-path might be "/var/lib/config"; then the controller shall set .status.command to // "/usr/bin/coreos-cloudinit --from-file=/var/lib/config". + // TODO(rfranzke): Deprecate this field once UseGardenerNodeAgent feature gate is promoted to GA. // +optional ReloadConfigFilePath *string `json:"reloadConfigFilePath,omitempty"` // Units is a list of unit for the operating system configuration (usually, a systemd unit). @@ -109,7 +109,7 @@ type Unit struct { Name string `json:"name"` // Command is the unit's command. // +optional - Command *string `json:"command,omitempty"` + Command *UnitCommand `json:"command,omitempty"` // Enable describes whether the unit is enabled or not. // +optional Enable *bool `json:"enable,omitempty"` @@ -121,6 +121,26 @@ type Unit struct { // +patchStrategy=merge // +optional DropIns []DropIn `json:"dropIns,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // FilePaths is a list of files the unit depends on. If any file changes a restart of the dependent unit will be + // triggered. For each FilePath there must exist a File with matching Path in OperatingSystemConfig.Spec.Files. + FilePaths []string `json:"filePaths,omitempty"` +} + +// UnitCommand is a string alias. +type UnitCommand string + +const ( + // CommandStart is the 'start' command for a unit. + CommandStart UnitCommand = "start" + // CommandRestart is the 'restart' command for a unit. + CommandRestart UnitCommand = "restart" + // CommandStop is the 'stop' command for a unit. + CommandStop UnitCommand = "stop" +) + +// UnitCommandPtr returns a pointer to the provided unit command. +func UnitCommandPtr(c UnitCommand) *UnitCommand { + return &c } // DropIn is a drop-in configuration for a systemd unit. @@ -156,6 +176,9 @@ type FileContent struct { // This for example can be used to manipulate the clear-text content before it reaches the node. // +optional TransmitUnencoded *bool `json:"transmitUnencoded,omitempty"` + // ImageRef describes a container image which contains a file. + // +optional + ImageRef *FileContentImageRef `json:"imageRef,omitempty"` } // FileContentSecretRef contains keys for referencing a file content's data from a secret in the same namespace. @@ -174,10 +197,28 @@ type FileContentInline struct { Data string `json:"data"` } +// FileContentImageRef describes a container image which contains a file +type FileContentImageRef struct { + // Image contains the container image repository with tag. + Image string `json:"image"` + // FilePathInImage contains the path in the image to the file that should be extracted. + FilePathInImage string `json:"filePathInImage"` +} + // OperatingSystemConfigStatus is the status for a OperatingSystemConfig resource. type OperatingSystemConfigStatus struct { // DefaultStatus is a structure containing common fields used by all extension resources. DefaultStatus `json:",inline"` + // ExtensionUnits is a list of additional systemd units provided by the extension. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + ExtensionUnits []Unit `json:"extensionUnits,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // ExtensionFiles is a list of additional files provided by the extension. + // +patchMergeKey=path + // +patchStrategy=merge + // +optional + ExtensionFiles []File `json:"extensionFiles,omitempty" patchStrategy:"merge" patchMergeKey:"path"` // CloudConfig is a structure for containing the generated output for the given operating system // config spec. It contains a reference to a secret as the result may contain confidential data. // +optional @@ -185,14 +226,17 @@ type OperatingSystemConfigStatus struct { // Command is the command whose execution renews/reloads the cloud config on an existing VM, e.g. // "/usr/bin/reload-cloud-config -from-file=". The is optionally provided by Gardener // in the .spec.reloadConfigFilePath field. + // TODO(rfranzke): Deprecate this field once UseGardenerNodeAgent feature gate is promoted to GA. // +optional Command *string `json:"command,omitempty"` // Units is a list of systemd unit names that are part of the generated Cloud Config and shall be // restarted when a new version has been downloaded. + // TODO(rfranzke): Deprecate this field once UseGardenerNodeAgent feature gate is promoted to GA. // +optional Units []string `json:"units,omitempty"` // Files is a list of file paths that are part of the generated Cloud Config and shall be // written to the host's file system. + // TODO(rfranzke): Deprecate this field once UseGardenerNodeAgent feature gate is promoted to GA. // +optional Files []string `json:"files,omitempty"` } @@ -249,8 +293,4 @@ const ( PlainFileCodecID FileCodecID = "" // B64FileCodecID is the base64 file codec id. B64FileCodecID FileCodecID = "b64" - // GZIPFileCodecID is the gzip file codec id. - GZIPFileCodecID FileCodecID = "gzip" - // GZIPB64FileCodecID is the gzip combined with base64 codec id. - GZIPB64FileCodecID FileCodecID = "gzip+b64" ) diff --git a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go index 3b949bd6b..b60aa5ea6 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go @@ -1056,6 +1056,11 @@ func (in *FileContent) DeepCopyInto(out *FileContent) { *out = new(bool) **out = **in } + if in.ImageRef != nil { + in, out := &in.ImageRef, &out.ImageRef + *out = new(FileContentImageRef) + **out = **in + } return } @@ -1069,6 +1074,22 @@ func (in *FileContent) DeepCopy() *FileContent { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileContentImageRef) DeepCopyInto(out *FileContentImageRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileContentImageRef. +func (in *FileContentImageRef) DeepCopy() *FileContentImageRef { + if in == nil { + return nil + } + out := new(FileContentImageRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FileContentInline) DeepCopyInto(out *FileContentInline) { *out = *in @@ -1468,6 +1489,20 @@ func (in *OperatingSystemConfigSpec) DeepCopy() *OperatingSystemConfigSpec { func (in *OperatingSystemConfigStatus) DeepCopyInto(out *OperatingSystemConfigStatus) { *out = *in in.DefaultStatus.DeepCopyInto(&out.DefaultStatus) + if in.ExtensionUnits != nil { + in, out := &in.ExtensionUnits, &out.ExtensionUnits + *out = make([]Unit, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtensionFiles != nil { + in, out := &in.ExtensionFiles, &out.ExtensionFiles + *out = make([]File, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.CloudConfig != nil { in, out := &in.CloudConfig, &out.CloudConfig *out = new(CloudConfig) @@ -1506,7 +1541,7 @@ func (in *Unit) DeepCopyInto(out *Unit) { *out = *in if in.Command != nil { in, out := &in.Command, &out.Command - *out = new(string) + *out = new(UnitCommand) **out = **in } if in.Enable != nil { @@ -1524,6 +1559,11 @@ func (in *Unit) DeepCopyInto(out *Unit) { *out = make([]DropIn, len(*in)) copy(*out, *in) } + if in.FilePaths != nil { + in, out := &in.FilePaths, &out.FilePaths + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/operator/v1alpha1/types.go b/vendor/github.com/gardener/gardener/pkg/apis/operator/v1alpha1/types.go index 2fccce0ff..19b94151a 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/operator/v1alpha1/types.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/operator/v1alpha1/types.go @@ -373,7 +373,7 @@ type KubeControllerManagerConfig struct { CertificateSigningDuration *metav1.Duration `json:"certificateSigningDuration,omitempty"` } -// Gardener contains the configuration settings for the Gardener componenets. +// Gardener contains the configuration settings for the Gardener components. type Gardener struct { // ClusterIdentity is the identity of the garden cluster. This field is immutable. // +kubebuilder:validation:MinLength=1 diff --git a/vendor/github.com/gardener/gardener/pkg/apis/resources/v1alpha1/types.go b/vendor/github.com/gardener/gardener/pkg/apis/resources/v1alpha1/types.go index 65b5d79ad..7e63aaabd 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/resources/v1alpha1/types.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/resources/v1alpha1/types.go @@ -52,6 +52,9 @@ const ( // It is set by the ManagedResource controller to the key of the owning ManagedResource, optionally prefixed with the // clusterID. OriginAnnotation = "resources.gardener.cloud/origin" + // FinalizeDeletionAfter is an annotation on an object part of a ManagedResource that whose value states the + // duration after which a deletion should be finalized (i.e., removal of `.metadata.finalizers[]`). + FinalizeDeletionAfter = "resources.gardener.cloud/finalize-deletion-after" // ManagedBy is a constant for a label on an object managed by a ManagedResource. // It is set by the ManagedResource controller depending on its configuration. By default it is set to "gardener". diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go index ee5c96421..cb14e6c4e 100644 --- a/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go +++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/default.go @@ -69,29 +69,7 @@ func NewWithServerVersion(serverVersion *version.Info) Interface { } } -// DiscoverCapabilities discovers the capabilities required for chart renderers using the given -// DiscoveryInterface. -func DiscoverCapabilities(disc discovery.DiscoveryInterface) (*chartutil.Capabilities, error) { - sv, err := disc.ServerVersion() - if err != nil { - return nil, fmt.Errorf("failed to get kubernetes server version %w", err) - } - - return &chartutil.Capabilities{KubeVersion: sv}, nil -} - -// Render loads the chart from the given location and calls the Render() function -// to convert it into a ChartRelease object. -// Deprecated: Use RenderEmbeddedFS for new code! -func (r *chartRenderer) Render(chartPath, releaseName, namespace string, values interface{}) (*RenderedChart, error) { - chart, err := chartutil.Load(chartPath) - if err != nil { - return nil, fmt.Errorf("can't load chart from path %s:, %s", chartPath, err) - } - return r.renderRelease(chart, releaseName, namespace, values) -} - -// RenderArchive loads the chart from the given location and calls the Render() function +// RenderArchive loads the chart from the given location and calls the renderRelease() function // to convert it into a ChartRelease object. func (r *chartRenderer) RenderArchive(archive []byte, releaseName, namespace string, values interface{}) (*RenderedChart, error) { chart, err := chartutil.LoadArchive(bytes.NewReader(archive)) @@ -101,7 +79,7 @@ func (r *chartRenderer) RenderArchive(archive []byte, releaseName, namespace str return r.renderRelease(chart, releaseName, namespace, values) } -// RenderEmbeddedFS loads the chart from the given embed.FS and calls the Render() function +// RenderEmbeddedFS loads the chart from the given embed.FS and calls the renderRelease() function // to convert it into a ChartRelease object. func (r *chartRenderer) RenderEmbeddedFS(embeddedFS embed.FS, chartPath, releaseName, namespace string, values interface{}) (*RenderedChart, error) { chart, err := loadEmbeddedFS(embeddedFS, chartPath) diff --git a/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go b/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go index d28c905c6..c9f2fd52f 100644 --- a/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go +++ b/vendor/github.com/gardener/gardener/pkg/chartrenderer/renderer.go @@ -21,10 +21,7 @@ import ( ) // Interface is an interface for rendering Helm Charts from path, name, namespace and values. -// TODO(rfranzke): Drop the deprecated method after Gardener v1.80 has been released. type Interface interface { - // Deprecated: Use RenderEmbeddedFS for new code! - Render(chartPath, releaseName, namespace string, values interface{}) (*RenderedChart, error) RenderEmbeddedFS(embeddedFS embed.FS, chartPath, releaseName, namespace string, values interface{}) (*RenderedChart, error) RenderArchive(archive []byte, releaseName, namespace string, values interface{}) (*RenderedChart, error) } diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/cache/aggregator.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/cache/aggregator.go index 7a2669cf8..db21f977a 100644 --- a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/cache/aggregator.go +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/cache/aggregator.go @@ -54,32 +54,16 @@ func (c *aggregator) cacheForKind(kind schema.GroupVersionKind) cache.Cache { return cache } -func processError(err error) error { - if !IsAPIError(err) { - // Return every other, unspecified error as a `CacheError` to allow users to follow up with a proper error handling. - // For instance, a `Multinamespace` cache returns an unspecified error for unknown namespaces. - // https://github.com/kubernetes-sigs/controller-runtime/blob/b5065bd85190e92864522fcc85aa4f6a3cce4f82/pkg/cache/multi_namespace_cache.go#L132 - return NewCacheError(err) - } - return err -} - // Get retrieves an obj for the given object key from the Kubernetes Cluster. // Every non-API related error is returned as a `CacheError`. func (c *aggregator) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - if err := c.cacheForObject(obj).Get(ctx, key, obj, opts...); err != nil { - return processError(err) - } - return nil + return c.cacheForObject(obj).Get(ctx, key, obj, opts...) } // List retrieves list of objects for a given namespace and list options. // Every non-API related error is returned as a `CacheError`. func (c *aggregator) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - if err := c.cacheForObject(list).List(ctx, list, opts...); err != nil { - return processError(err) - } - return nil + return c.cacheForObject(list).List(ctx, list, opts...) } func (c *aggregator) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/cache/errors.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/cache/errors.go deleted file mode 100644 index fc49d35c2..000000000 --- a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/cache/errors.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import ( - "fmt" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" -) - -// IsAPIError checks if the given error is API related. -func IsAPIError(err error) bool { - if _, ok := err.(apierrors.APIStatus); ok { - return true - } - if meta.IsNoMatchError(err) || meta.IsAmbiguousError(err) { - return true - } - return false -} - -// CacheError is an error type indicating that an cache error occurred. -type CacheError struct { - cause error -} - -// Unwrap returns the next error in the error chain. -func (e *CacheError) Unwrap() error { - return e.cause -} - -// Error returns the error string with the underlying error. -func (e *CacheError) Error() string { - return fmt.Errorf("an underlying cache error occurred: %w", e.cause).Error() -} - -// NewCacheError returns a new instance of `CacheError`. -func NewCacheError(err error) error { - return &CacheError{err} -} diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go index eafb07003..f536e8e61 100644 --- a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/chartapplier.go @@ -25,13 +25,8 @@ import ( // ChartApplier is an interface that describes needed methods that render and apply // Helm charts in Kubernetes clusters. -// TODO(rfranzke): Drop the deprecated methods after Gardener v1.80 has been released. type ChartApplier interface { chartrenderer.Interface - // Deprecated: Use ApplyFromEmbeddedFS for new code! - Apply(ctx context.Context, chartPath, namespace, name string, opts ...ApplyOption) error - // Deprecated: Use DeleteFromEmbeddedFS for new code! - Delete(ctx context.Context, chartPath, namespace, name string, opts ...DeleteOption) error ApplyFromEmbeddedFS(ctx context.Context, embeddedFS embed.FS, chartPath, namespace, name string, opts ...ApplyOption) error DeleteFromEmbeddedFS(ctx context.Context, embeddedFS embed.FS, chartPath, namespace, name string, opts ...DeleteOption) error } @@ -60,20 +55,7 @@ func NewChartApplierForConfig(config *rest.Config) (ChartApplier, error) { return NewChartApplier(renderer, applier), nil } -// Apply takes a path to a chart , name of the release , -// release's namespace and renders the template based value. -// The resulting manifest will be applied to the cluster the Kubernetes client has been created for. -// can be used to enchance the existing functionality. -// Deprecated: Use ApplyFromEmbeddedFS for new code! -func (c *chartApplier) Apply(ctx context.Context, chartPath, namespace, name string, opts ...ApplyOption) error { - return c.apply(ctx, nil, chartPath, namespace, name, opts...) -} - func (c *chartApplier) ApplyFromEmbeddedFS(ctx context.Context, embeddedFS embed.FS, chartPath, namespace, name string, opts ...ApplyOption) error { - return c.apply(ctx, &embeddedFS, chartPath, namespace, name, opts...) -} - -func (c *chartApplier) apply(ctx context.Context, embeddedFS *embed.FS, chartPath, namespace, name string, opts ...ApplyOption) error { applyOpts := &ApplyOptions{} for _, o := range opts { @@ -98,19 +80,7 @@ func (c *chartApplier) apply(ctx context.Context, embeddedFS *embed.FS, chartPat return c.ApplyManifest(ctx, manifestReader, applyOpts.MergeFuncs) } -// Delete takes a path to a chart , name of the release , -// release's namespace and renders the template. -// The resulting manifest will be deleted from the cluster the Kubernetes client has been created for. -// Deprecated: Use DeleteFromEmbeddedFS for new code! -func (c *chartApplier) Delete(ctx context.Context, chartPath, namespace, name string, opts ...DeleteOption) error { - return c.delete(ctx, nil, chartPath, namespace, name, opts...) -} - func (c *chartApplier) DeleteFromEmbeddedFS(ctx context.Context, embeddedFS embed.FS, chartPath, namespace, name string, opts ...DeleteOption) error { - return c.delete(ctx, &embeddedFS, chartPath, namespace, name, opts...) -} - -func (c *chartApplier) delete(ctx context.Context, embeddedFS *embed.FS, chartPath, namespace, name string, opts ...DeleteOption) error { deleteOpts := &DeleteOptions{} for _, o := range opts { @@ -139,22 +109,15 @@ func (c *chartApplier) delete(ctx context.Context, embeddedFS *embed.FS, chartPa return c.DeleteManifest(ctx, manifestReader, deleteManifestOpts...) } -func (c *chartApplier) newManifestReader(embeddedFS *embed.FS, chartPath, namespace, name string, values interface{}) (UnstructuredReader, error) { +func (c *chartApplier) newManifestReader(embeddedFS embed.FS, chartPath, namespace, name string, values interface{}) (UnstructuredReader, error) { var ( release *chartrenderer.RenderedChart err error ) - if embeddedFS != nil { - release, err = c.RenderEmbeddedFS(*embeddedFS, chartPath, name, namespace, values) - if err != nil { - return nil, err - } - } else { - release, err = c.Render(chartPath, name, namespace, values) - if err != nil { - return nil, err - } + release, err = c.RenderEmbeddedFS(embeddedFS, chartPath, name, namespace, values) + if err != nil { + return nil, err } return NewManifestReader(release.Manifest()), nil diff --git a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go index fd9e3ed95..4fb96fac2 100644 --- a/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go +++ b/vendor/github.com/gardener/gardener/pkg/client/kubernetes/client.go @@ -31,12 +31,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - logf "sigs.k8s.io/controller-runtime/pkg/log" gardencoreinstall "github.com/gardener/gardener/pkg/apis/core/install" seedmanagementinstall "github.com/gardener/gardener/pkg/apis/seedmanagement/install" settingsinstall "github.com/gardener/gardener/pkg/apis/settings/install" - kubernetescache "github.com/gardener/gardener/pkg/client/kubernetes/cache" "github.com/gardener/gardener/pkg/utils" ) @@ -380,17 +378,15 @@ func newClient(conf *Config, reader client.Reader) (client.Client, error) { var _ client.Client = &FallbackClient{} // FallbackClient holds a `client.Client` and a `client.Reader` which is meant as a fallback -// in case Get/List requests with the ordinary `client.Client` fail (e.g. because of cache errors). +// in case the kind of an object is configured in `KindToNamespaces` but the namespace isn't. type FallbackClient struct { client.Client Reader client.Reader KindToNamespaces map[string]sets.Set[string] } -var cacheError = &kubernetescache.CacheError{} - // Get retrieves an obj for a given object key from the Kubernetes Cluster. -// In case of a cache error, the underlying API reader is used to execute the request again. +// `client.Reader` is used in case the kind of an object is configured in `KindToNamespaces` but the namespace isn't. func (d *FallbackClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { gvk, err := apiutil.GVKForObject(obj, GardenScheme) if err != nil { @@ -407,23 +403,10 @@ func (d *FallbackClient) Get(ctx context.Context, key client.ObjectKey, obj clie } // Otherwise, try to get the object from the cache. - err = d.Client.Get(ctx, key, obj, opts...) - - // If an error occurs and it's a cache error, log it and use the API reader as a fallback. - if err != nil && errors.As(err, &cacheError) { - logf.Log.V(1).Info("Falling back to API reader because a cache error occurred", "error", err) - return d.Reader.Get(ctx, key, obj, opts...) - } - return err + return d.Client.Get(ctx, key, obj, opts...) } // List retrieves list of objects for a given namespace and list options. -// In case of a cache error, the underlying API reader is used to execute the request again. func (d *FallbackClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - err := d.Client.List(ctx, list, opts...) - if err != nil && errors.As(err, &cacheError) { - logf.Log.V(1).Info("Falling back to API reader because a cache error occurred", "error", err) - return d.Reader.List(ctx, list, opts...) - } - return err + return d.Client.List(ctx, list, opts...) } diff --git a/vendor/github.com/gardener/gardener/pkg/extensions/customresources.go b/vendor/github.com/gardener/gardener/pkg/extensions/customresources.go index 16989e1a6..78f198b2d 100644 --- a/vendor/github.com/gardener/gardener/pkg/extensions/customresources.go +++ b/vendor/github.com/gardener/gardener/pkg/extensions/customresources.go @@ -223,7 +223,7 @@ func WaitUntilExtensionObjectDeleted( } if lastErr := obj.GetExtensionStatus().GetLastError(); lastErr != nil { - log.Error(fmt.Errorf(lastErr.Description), "Object did not get deleted yet") + log.Info("Object did not get deleted yet", "description", lastErr.Description) lastObservedError = v1beta1helper.NewErrorWithCodes(errors.New(lastErr.Description), lastErr.Codes...) } diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go index 6a002a06c..18e431649 100644 --- a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/types.go @@ -461,6 +461,8 @@ type ETCDConfig struct { // "github.com/gardener/etcd-druid/pkg/features/features.go". // Default: nil FeatureGates map[string]bool + // DeltaSnapshotRetentionPeriod defines the duration for which delta snapshots will be retained, excluding the latest snapshot set. + DeltaSnapshotRetentionPeriod *metav1.Duration } // ETCDController contains config specific to ETCD controller @@ -491,6 +493,9 @@ type BackupCompactionController struct { // ActiveDeadlineDuration defines duration after which a running backup compaction job will be killed // Defaults to 3 hours ActiveDeadlineDuration *metav1.Duration + // MetricsScrapeWaitDuration is the duration to wait for after compaction job is completed, to allow Prometheus metrics to be scraped + // Defaults to 60 seconds + MetricsScrapeWaitDuration *metav1.Duration } // ETCDBackupLeaderElection contains configuration for the leader election for the etcd backup-restore sidecar. diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/defaults.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/defaults.go index d4ba15388..6fce6a77b 100644 --- a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/defaults.go +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/defaults.go @@ -497,4 +497,7 @@ func SetDefaults_ETCDConfig(obj *ETCDConfig) { if obj.BackupCompactionController.EventsThreshold == nil { obj.BackupCompactionController.EventsThreshold = pointer.Int64(1000000) } + if obj.BackupCompactionController.MetricsScrapeWaitDuration == nil { + obj.BackupCompactionController.MetricsScrapeWaitDuration = &metav1.Duration{Duration: 60 * time.Second} + } } diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/types.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/types.go index d109abc33..e0f50d3de 100644 --- a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/types.go +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/types.go @@ -560,6 +560,9 @@ type ETCDConfig struct { // Default: nil // +optional FeatureGates map[string]bool `json:"featureGates,omitempty"` + // DeltaSnapshotRetentionPeriod defines the duration for which delta snapshots will be retained, excluding the latest snapshot set. + // +optional + DeltaSnapshotRetentionPeriod *metav1.Duration `json:"deltaSnapshotRetentionPeriod,omitempty"` } // ETCDController contains config specific to ETCD controller @@ -596,6 +599,10 @@ type BackupCompactionController struct { // Defaults to 3 hours // +optional ActiveDeadlineDuration *metav1.Duration `json:"activeDeadlineDuration,omitempty"` + // MetricsScrapeWaitDuration is the duration to wait for after compaction job is completed, to allow Prometheus metrics to be scraped + // Defaults to 60 seconds + // +optional + MetricsScrapeWaitDuration *metav1.Duration `json:"metricsScrapeWaitDuration,omitempty"` } // ETCDBackupLeaderElection contains configuration for the leader election for the etcd backup-restore sidecar. diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.conversion.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.conversion.go index 27677309a..9b0a82075 100644 --- a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.conversion.go +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.conversion.go @@ -521,6 +521,7 @@ func autoConvert_v1alpha1_BackupCompactionController_To_config_BackupCompactionC out.EnableBackupCompaction = (*bool)(unsafe.Pointer(in.EnableBackupCompaction)) out.EventsThreshold = (*int64)(unsafe.Pointer(in.EventsThreshold)) out.ActiveDeadlineDuration = (*v1.Duration)(unsafe.Pointer(in.ActiveDeadlineDuration)) + out.MetricsScrapeWaitDuration = (*v1.Duration)(unsafe.Pointer(in.MetricsScrapeWaitDuration)) return nil } @@ -534,6 +535,7 @@ func autoConvert_config_BackupCompactionController_To_v1alpha1_BackupCompactionC out.EnableBackupCompaction = (*bool)(unsafe.Pointer(in.EnableBackupCompaction)) out.EventsThreshold = (*int64)(unsafe.Pointer(in.EventsThreshold)) out.ActiveDeadlineDuration = (*v1.Duration)(unsafe.Pointer(in.ActiveDeadlineDuration)) + out.MetricsScrapeWaitDuration = (*v1.Duration)(unsafe.Pointer(in.MetricsScrapeWaitDuration)) return nil } @@ -718,6 +720,7 @@ func autoConvert_v1alpha1_ETCDConfig_To_config_ETCDConfig(in *ETCDConfig, out *c out.BackupCompactionController = (*config.BackupCompactionController)(unsafe.Pointer(in.BackupCompactionController)) out.BackupLeaderElection = (*config.ETCDBackupLeaderElection)(unsafe.Pointer(in.BackupLeaderElection)) out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + out.DeltaSnapshotRetentionPeriod = (*v1.Duration)(unsafe.Pointer(in.DeltaSnapshotRetentionPeriod)) return nil } @@ -732,6 +735,7 @@ func autoConvert_config_ETCDConfig_To_v1alpha1_ETCDConfig(in *config.ETCDConfig, out.BackupCompactionController = (*BackupCompactionController)(unsafe.Pointer(in.BackupCompactionController)) out.BackupLeaderElection = (*ETCDBackupLeaderElection)(unsafe.Pointer(in.BackupLeaderElection)) out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + out.DeltaSnapshotRetentionPeriod = (*v1.Duration)(unsafe.Pointer(in.DeltaSnapshotRetentionPeriod)) return nil } diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.deepcopy.go index d7dd019d7..15302cf09 100644 --- a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1/zz_generated.deepcopy.go @@ -73,6 +73,11 @@ func (in *BackupCompactionController) DeepCopyInto(out *BackupCompactionControll *out = new(v1.Duration) **out = **in } + if in.MetricsScrapeWaitDuration != nil { + in, out := &in.MetricsScrapeWaitDuration, &out.MetricsScrapeWaitDuration + *out = new(v1.Duration) + **out = **in + } return } @@ -300,6 +305,11 @@ func (in *ETCDConfig) DeepCopyInto(out *ETCDConfig) { (*out)[key] = val } } + if in.DeltaSnapshotRetentionPeriod != nil { + in, out := &in.DeltaSnapshotRetentionPeriod, &out.DeltaSnapshotRetentionPeriod + *out = new(v1.Duration) + **out = **in + } return } diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go index 0143aee43..5d6d95943 100644 --- a/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/apis/config/zz_generated.deepcopy.go @@ -73,6 +73,11 @@ func (in *BackupCompactionController) DeepCopyInto(out *BackupCompactionControll *out = new(v1.Duration) **out = **in } + if in.MetricsScrapeWaitDuration != nil { + in, out := &in.MetricsScrapeWaitDuration, &out.MetricsScrapeWaitDuration + *out = new(v1.Duration) + **out = **in + } return } @@ -300,6 +305,11 @@ func (in *ETCDConfig) DeepCopyInto(out *ETCDConfig) { (*out)[key] = val } } + if in.DeltaSnapshotRetentionPeriod != nil { + in, out := &in.DeltaSnapshotRetentionPeriod, &out.DeltaSnapshotRetentionPeriod + *out = new(v1.Duration) + **out = **in + } return } diff --git a/vendor/github.com/gardener/gardener/pkg/utils/errors/unwrap.go b/vendor/github.com/gardener/gardener/pkg/utils/errors/unwrap.go index d79b43d47..f751dda45 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/errors/unwrap.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/errors/unwrap.go @@ -14,7 +14,9 @@ package errors -import "errors" +import ( + "errors" +) // Unwrap unwraps and returns the root error. Multiple wrappings via `fmt.Errorf` implementations are properly taken into account. func Unwrap(err error) error { diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/flow.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/flow.go index 935d3ab90..03d9c54bc 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/flow/flow.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/flow.go @@ -79,6 +79,7 @@ type node struct { targetIDs TaskIDs required int fn TaskFn + skip bool } func (n *node) String() string { @@ -114,8 +115,9 @@ func (f *Flow) Run(ctx context.Context, opts Opts) error { } type nodeResult struct { - TaskID TaskID - Error error + TaskID TaskID + Error error + skipped bool } // Stats are the statistics of a Flow execution. @@ -125,6 +127,7 @@ type Stats struct { Succeeded TaskIDs Failed TaskIDs Running TaskIDs + Skipped TaskIDs Pending TaskIDs } @@ -142,6 +145,7 @@ func (s *Stats) Copy() *Stats { s.Succeeded.Copy(), s.Failed.Copy(), s.Running.Copy(), + s.Skipped.Copy(), s.Pending.Copy(), } } @@ -155,6 +159,7 @@ func InitialStats(flowName string, all TaskIDs) *Stats { NewTaskIDs(), NewTaskIDs(), NewTaskIDs(), + NewTaskIDs(), all.Copy(), } } @@ -162,8 +167,10 @@ func InitialStats(flowName string, all TaskIDs) *Stats { func newExecution(flow *Flow, opts Opts) *execution { all := NewTaskIDs() - for name := range flow.nodes { - all.Insert(name) + for name, task := range flow.nodes { + if !task.skip { + all.Insert(name) + } } log := logf.Log.WithName("flow").WithValues(logKeyFlow, flow.name) @@ -200,6 +207,19 @@ type execution struct { } func (e *execution) runNode(ctx context.Context, id TaskID) { + log := e.log.WithValues(logKeyTask, id) + + node := e.flow.nodes[id] + if node.skip { + log.V(1).Info("Skipped") + e.stats.Skipped.Insert(id) + go func() { + e.done <- &nodeResult{TaskID: id, Error: nil, skipped: true} + }() + + return + } + if e.errorContext != nil { e.errorContext.AddErrorID(string(id)) } @@ -207,17 +227,16 @@ func (e *execution) runNode(ctx context.Context, id TaskID) { e.stats.Running.Insert(id) go func() { start := time.Now().UTC() - - e.log.WithValues(logKeyTask, id).V(1).Info("Started") - err := e.flow.nodes[id].fn(ctx) + log.V(1).Info("Started") + err := node.fn(ctx) end := time.Now().UTC() - e.log.WithValues(logKeyTask, id).V(1).Info("Finished", "duration", end.Sub(start)) + log.V(1).Info("Finished", "duration", end.Sub(start)) if err != nil { - e.log.WithValues(logKeyTask, id).Error(err, "Error") + log.Error(err, "Error") err = fmt.Errorf("task %q failed: %w", id, err) } else { - e.log.WithValues(logKeyTask, id).Info("Succeeded") + log.Info("Succeeded") } e.done <- &nodeResult{TaskID: id, Error: err} @@ -280,19 +299,26 @@ func (e *execution) run(ctx context.Context) error { } e.reportProgress(ctx) - for e.stats.Running.Len() > 0 { + for e.stats.Running.Len() > 0 || e.stats.Skipped.Len() > 0 { result := <-e.done - if result.Error != nil { - e.taskErrors = append(e.taskErrors, errorsutils.WithID(string(result.TaskID), result.Error)) - e.updateFailure(result.TaskID) - } else { - e.updateSuccess(result.TaskID) - if e.errorContext != nil && e.errorContext.HasLastErrorWithID(string(result.TaskID)) { - e.cleanErrors(ctx, result.TaskID) - } + if result.skipped { + e.stats.Skipped.Delete(result.TaskID) if cancelErr = ctx.Err(); cancelErr == nil { e.processTriggers(ctx, result.TaskID) } + } else { + if result.Error != nil { + e.taskErrors = append(e.taskErrors, errorsutils.WithID(string(result.TaskID), result.Error)) + e.updateFailure(result.TaskID) + } else { + e.updateSuccess(result.TaskID) + if e.errorContext != nil && e.errorContext.HasLastErrorWithID(string(result.TaskID)) { + e.cleanErrors(ctx, result.TaskID) + } + if cancelErr = ctx.Err(); cancelErr == nil { + e.processTriggers(ctx, result.TaskID) + } + } } e.reportProgress(ctx) } diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/graph.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/graph.go index 8d757dce7..8cba75c52 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/flow/graph.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/graph.go @@ -23,6 +23,7 @@ import ( type Task struct { Name string Fn TaskFn + SkipIf bool Dependencies TaskIDs } @@ -30,6 +31,7 @@ type Task struct { func (t *Task) Spec() *TaskSpec { return &TaskSpec{ t.Fn, + t.SkipIf, t.Dependencies.Copy(), } } @@ -38,6 +40,7 @@ func (t *Task) Spec() *TaskSpec { // the dependencies of the Task. type TaskSpec struct { Fn TaskFn + Skip bool Dependencies TaskIDs } @@ -91,6 +94,7 @@ func (g *Graph) Compile() *Flow { node := nodes.getOrCreate(taskName) node.fn = taskSpec.Fn + node.skip = taskSpec.Skip node.required = taskSpec.Dependencies.Len() } diff --git a/vendor/github.com/gardener/gardener/pkg/utils/flow/taskfn.go b/vendor/github.com/gardener/gardener/pkg/utils/flow/taskfn.go index 60288f052..1db945334 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/flow/taskfn.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/flow/taskfn.go @@ -35,24 +35,6 @@ type TaskFn func(ctx context.Context) error // RecoverFn is a function that can recover an error. type RecoverFn func(ctx context.Context, err error) error -// EmptyTaskFn is a TaskFn that does nothing (returns nil). -var EmptyTaskFn TaskFn = func(ctx context.Context) error { return nil } - -// SkipIf returns a TaskFn that does nothing if the condition is true, otherwise the function -// will be executed once called. -func (t TaskFn) SkipIf(condition bool) TaskFn { - if condition { - return EmptyTaskFn - } - return t -} - -// DoIf returns a TaskFn that will be executed if the condition is true when it is called. -// Otherwise, it will do nothing when called. -func (t TaskFn) DoIf(condition bool) TaskFn { - return t.SkipIf(!condition) -} - // Timeout returns a TaskFn that is bound to a context which times out. func (t TaskFn) Timeout(timeout time.Duration) TaskFn { return func(ctx context.Context) error { diff --git a/vendor/github.com/gardener/gardener/pkg/utils/gardener/machines.go b/vendor/github.com/gardener/gardener/pkg/utils/gardener/machines.go new file mode 100644 index 000000000..ca2b557c3 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/gardener/machines.go @@ -0,0 +1,170 @@ +// Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gardener + +import ( + "context" + "fmt" + "strings" + "time" + + machinev1alpha1 "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + retryutils "github.com/gardener/gardener/pkg/utils/retry" +) + +const ( + nameLabel = "name" + // MachineSetKind is the kind of the owner reference of a machine set + MachineSetKind = "MachineSet" + // MachineDeploymentKind is the kind of the owner reference of a machine deployment + MachineDeploymentKind = "MachineDeployment" +) + +// BuildOwnerToMachinesMap returns a map that associates `MachineSet` names to the given `machines`. +func BuildOwnerToMachinesMap(machines []machinev1alpha1.Machine) map[string][]machinev1alpha1.Machine { + ownerToMachines := make(map[string][]machinev1alpha1.Machine) + for index, machine := range machines { + if len(machine.OwnerReferences) > 0 { + for _, reference := range machine.OwnerReferences { + if reference.Kind == MachineSetKind { + ownerToMachines[reference.Name] = append(ownerToMachines[reference.Name], machines[index]) + } + } + } else if len(machine.Labels) > 0 { + if machineDeploymentName, ok := machine.Labels[nameLabel]; ok { + ownerToMachines[machineDeploymentName] = append(ownerToMachines[machineDeploymentName], machines[index]) + } + } + } + return ownerToMachines +} + +// BuildOwnerToMachineSetsMap returns a map that associates `MachineDeployment` names to the given `machineSets`. +func BuildOwnerToMachineSetsMap(machineSets []machinev1alpha1.MachineSet) map[string][]machinev1alpha1.MachineSet { + ownerToMachineSets := make(map[string][]machinev1alpha1.MachineSet) + for index, machineSet := range machineSets { + if len(machineSet.OwnerReferences) > 0 { + for _, reference := range machineSet.OwnerReferences { + if reference.Kind == MachineDeploymentKind { + ownerToMachineSets[reference.Name] = append(ownerToMachineSets[reference.Name], machineSets[index]) + } + } + } else if len(machineSet.Labels) > 0 { + if machineDeploymentName, ok := machineSet.Labels[nameLabel]; ok { + ownerToMachineSets[machineDeploymentName] = append(ownerToMachineSets[machineDeploymentName], machineSets[index]) + } + } + } + return ownerToMachineSets +} + +// WaitUntilMachineResourcesDeleted waits for a maximum of 30 minutes until all machine resources have been properly +// deleted by the machine-controller-manager. It polls the status every 5 seconds. +func WaitUntilMachineResourcesDeleted(ctx context.Context, log logr.Logger, reader client.Reader, namespace string) error { + var ( + countMachines = -1 + countMachineSets = -1 + countMachineDeployments = -1 + countMachineClasses = -1 + countMachineClassSecrets = -1 + ) + log.Info("Waiting until all machine resources have been deleted") + + return retryutils.UntilTimeout(ctx, 5*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { + var msg string + + // Check whether all machines have been deleted. + if countMachines != 0 { + machineList := &metav1.PartialObjectMetadataList{} + machineList.SetGroupVersionKind(machinev1alpha1.SchemeGroupVersion.WithKind("MachineList")) + if err := reader.List(ctx, machineList, client.InNamespace(namespace)); err != nil { + return retryutils.SevereError(err) + } + countMachines = len(machineList.Items) + msg += fmt.Sprintf("%d machines, ", countMachines) + } + + // Check whether all machine sets have been deleted. + if countMachineSets != 0 { + machineSetList := &metav1.PartialObjectMetadataList{} + machineSetList.SetGroupVersionKind(machinev1alpha1.SchemeGroupVersion.WithKind("MachineSetList")) + if err := reader.List(ctx, machineSetList, client.InNamespace(namespace)); err != nil { + return retryutils.SevereError(err) + } + countMachineSets = len(machineSetList.Items) + msg += fmt.Sprintf("%d machine sets, ", countMachineSets) + } + + // Check whether all machine deployments have been deleted. + if countMachineDeployments != 0 { + machineDeploymentList := &machinev1alpha1.MachineDeploymentList{} + if err := reader.List(ctx, machineDeploymentList, client.InNamespace(namespace)); err != nil { + return retryutils.SevereError(err) + } + countMachineDeployments = len(machineDeploymentList.Items) + msg += fmt.Sprintf("%d machine deployments, ", countMachineDeployments) + + // Check whether an operation failed during the deletion process. + for _, existingMachineDeployment := range machineDeploymentList.Items { + for _, failedMachine := range existingMachineDeployment.Status.FailedMachines { + return retryutils.SevereError(fmt.Errorf("machine %s failed: %s", failedMachine.Name, failedMachine.LastOperation.Description)) + } + } + } + + // Check whether all machine classes have been deleted. + if countMachineClasses != 0 { + machineClassList := &metav1.PartialObjectMetadataList{} + machineClassList.SetGroupVersionKind(machinev1alpha1.SchemeGroupVersion.WithKind("MachineClassList")) + if err := reader.List(ctx, machineClassList, client.InNamespace(namespace)); err != nil { + return retryutils.SevereError(err) + } + countMachineClasses = len(machineClassList.Items) + msg += fmt.Sprintf("%d machine classes, ", countMachineClasses) + } + + // Check whether all machine class secrets have been deleted. + if countMachineClassSecrets != 0 { + count := 0 + machineClassSecretsList := &metav1.PartialObjectMetadataList{} + machineClassSecretsList.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("SecretList")) + if err := reader.List(ctx, machineClassSecretsList, client.InNamespace(namespace), client.MatchingLabels(map[string]string{v1beta1constants.GardenerPurpose: v1beta1constants.GardenPurposeMachineClass})); err != nil { + return retryutils.SevereError(err) + } + for _, machineClassSecret := range machineClassSecretsList.Items { + if len(machineClassSecret.Finalizers) != 0 { + count++ + } + } + countMachineClassSecrets = count + msg += fmt.Sprintf("%d machine class secrets, ", countMachineClassSecrets) + } + + if countMachines != 0 || countMachineSets != 0 || countMachineDeployments != 0 || countMachineClasses != 0 || countMachineClassSecrets != 0 { + log.Info("Waiting until machine resources have been deleted", + "machines", countMachines, "machineSets", countMachineSets, "machineDeployments", countMachineDeployments, + "machineClasses", countMachineClasses, "machineClassSecrets", countMachineClassSecrets) + return retryutils.MinorError(fmt.Errorf("waiting until the following machine resources have been deleted: %s", strings.TrimSuffix(msg, ", "))) + } + + return retryutils.Ok() + }) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/gardener/seed.go b/vendor/github.com/gardener/gardener/pkg/utils/gardener/seed.go index 769ee7c7a..eacfb23d1 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/gardener/seed.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/gardener/seed.go @@ -23,7 +23,6 @@ import ( certificatesv1 "k8s.io/api/certificates/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" @@ -32,8 +31,6 @@ import ( v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" - operatorv1alpha1 "github.com/gardener/gardener/pkg/apis/operator/v1alpha1" - kubernetesutils "github.com/gardener/gardener/pkg/utils/kubernetes" ) const ( @@ -128,18 +125,6 @@ func GetWildcardCertificate(ctx context.Context, c client.Client) (*corev1.Secre return nil, nil } -// SeedIsGarden returns 'true' if the cluster is registered as a Garden cluster. -func SeedIsGarden(ctx context.Context, seedClient client.Reader) (bool, error) { - seedIsGarden, err := kubernetesutils.ResourcesExist(ctx, seedClient, operatorv1alpha1.SchemeGroupVersion.WithKind("GardenList")) - if err != nil { - if !meta.IsNoMatchError(err) { - return false, err - } - seedIsGarden = false - } - return seedIsGarden, nil -} - // ComputeRequiredExtensionsForSeed computes the extension kind/type combinations that are required for the // seed reconciliation flow. func ComputeRequiredExtensionsForSeed(seed *gardencorev1beta1.Seed) sets.Set[string] { diff --git a/vendor/github.com/gardener/gardener/pkg/utils/gardener/shoot.go b/vendor/github.com/gardener/gardener/pkg/utils/gardener/shoot.go index 9c65e2a36..bd7272a93 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/gardener/shoot.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/gardener/shoot.go @@ -681,3 +681,18 @@ func ComputeTechnicalID(projectName string, shoot *gardencorev1beta1.Shoot) stri // New clusters shall be created with the new technical id (double hyphens). return fmt.Sprintf("%s-%s--%s", v1beta1constants.TechnicalIDPrefix, projectName, shoot.Name) } + +// GetShootConditionTypes returns all known shoot condition types. +func GetShootConditionTypes(workerless bool) []gardencorev1beta1.ConditionType { + shootConditionTypes := []gardencorev1beta1.ConditionType{ + gardencorev1beta1.ShootAPIServerAvailable, + gardencorev1beta1.ShootControlPlaneHealthy, + gardencorev1beta1.ShootObservabilityComponentsHealthy, + } + + if !workerless { + shootConditionTypes = append(shootConditionTypes, gardencorev1beta1.ShootEveryNodeReady) + } + + return append(shootConditionTypes, gardencorev1beta1.ShootSystemComponentsHealthy) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllers.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllers.go new file mode 100644 index 000000000..fea76268e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/controllers.go @@ -0,0 +1,118 @@ +// Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import versionutils "github.com/gardener/gardener/pkg/utils/version" + +// APIGroupControllerMap is a map for the Kubernetes API groups and the corresponding controllers for them. +var APIGroupControllerMap = map[string]map[string]versionutils.VersionRange{ + "internal/v1alpha1": { + "storage-version-gc": {}, + }, + "apps/v1": { + "daemonset": {}, + "deployment": {}, + "replicaset": {}, + "statefulset": {}, + }, + "apps/v1beta1": { + "disruption": {}, + }, + "authentication/v1": { + "attachdetach": {}, + "persistentvolume-expander": {}, + }, + "authorization/v1": { + "csrapproving": {}, + }, + "autoscaling/v1": { + "horizontalpodautoscaling": {}, + }, + "autoscaling/v2": { + "horizontalpodautoscaling": {}, + }, + "batch/v1": { + "cronjob": {}, + "job": {}, + "ttl-after-finished": {}, + }, + "certificates/v1": { + "csrapproving": {}, + "csrcleaner": {}, + "csrsigning": {}, + }, + "certificates/v1beta1": { + "csrsigning": {}, + }, + "coordination/v1": { + "nodelifecycle": {}, + "storage-version-gc": {}, + }, + "discovery/v1": { + "endpointslice": {}, + "endpointslicemirroring": {}, + }, + "extensions/v1beta1": { + "disruption": {}, + }, + "policy/v1": { + "disruption": {}, + }, + "rbac/v1": { + "clusterrole-aggregation": {}, + }, + "resource/v1alpha2": { + "resource-claim-controller": {AddedInVersion: "1.27"}, + }, + "v1": { + "attachdetach": {}, + "bootstrapsigner": {}, + "cloud-node-lifecycle": {}, + "cronjob": {}, + "csrapproving": {}, + "csrsigning": {}, + "daemonset": {}, + "deployment": {}, + "disruption": {}, + "endpoint": {}, + "endpointslice": {}, + "endpointslicemirroring": {}, + "ephemeral-volume": {}, + "garbagecollector": {}, + "horizontalpodautoscaling": {}, + "job": {}, + "legacy-service-account-token-cleaner": {AddedInVersion: "1.28"}, + "namespace": {}, + "nodelifecycle": {}, + "persistentvolume-binder": {}, + "persistentvolume-expander": {}, + "podgc": {}, + "pv-protection": {}, + "pvc-protection": {}, + "replicaset": {}, + "replicationcontroller": {}, + "resource-claim-controller": {AddedInVersion: "1.27"}, + "resourcequota": {}, + "root-ca-cert-publisher": {}, + "route": {}, + "service": {}, + "serviceaccount": {}, + "serviceaccount-token": {}, + "statefulset": {}, + "tokencleaner": {}, + "ttl": {}, + "ttl-after-finished": {}, + }, +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/daemonset.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/daemonset.go index 577259707..e3f6985ed 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/daemonset.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/health/daemonset.go @@ -31,7 +31,7 @@ func daemonSetMaxUnavailable(daemonSet *appsv1.DaemonSet) int32 { return 0 } - maxUnavailable, err := intstr.GetValueFromIntOrPercent(rollingUpdate.MaxUnavailable, int(daemonSet.Status.DesiredNumberScheduled), false) + maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(rollingUpdate.MaxUnavailable, int(daemonSet.Status.DesiredNumberScheduled), false) if err != nil { return 0 } diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/object.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/object.go index 148eab9f8..8080383d3 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/object.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/object.go @@ -23,10 +23,11 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "github.com/gardener/gardener/pkg/resourcemanager/controller/garbagecollector/references" "github.com/gardener/gardener/pkg/utils" @@ -87,17 +88,54 @@ func DeleteObjectsFromListConditionally(ctx context.Context, c client.Client, li return flow.Parallel(fns...)(ctx) } -// ResourcesExist checks if there is at least one object of the given gvk. The kind in the gvk must be the list kind, -// for example corev1.SchemeGroupVersion.WithKind("PodList"). -func ResourcesExist(ctx context.Context, reader client.Reader, gvk schema.GroupVersionKind, listOpts ...client.ListOption) (bool, error) { - objects := &metav1.PartialObjectMetadataList{} - objects.SetGroupVersionKind(gvk) +// ResourcesExist checks if there is at least one object of the given objList. +func ResourcesExist(ctx context.Context, reader client.Reader, objList client.ObjectList, scheme *runtime.Scheme, listOpts ...client.ListOption) (bool, error) { + objects := objList + + // Use `PartialObjectMetadata` if no or metadata only field selectors are passed (informer's indexers only have access to metadata fields). + if hasNoOrMetadataOnlyFieldSelector(listOpts...) { + gvk, err := apiutil.GVKForObject(objList, scheme) + if err != nil { + return false, err + } + + objects = &metav1.PartialObjectMetadataList{} + objects.(*metav1.PartialObjectMetadataList).SetGroupVersionKind(gvk) + } if err := reader.List(ctx, objects, append(listOpts, client.Limit(1))...); err != nil { return true, err } - return len(objects.Items) > 0, nil + switch o := objects.(type) { + case *metav1.PartialObjectMetadataList: + return len(o.Items) > 0, nil + default: + items, err := meta.ExtractList(objList) + if err != nil { + return false, err + } + return len(items) > 0, err + } +} + +func hasNoOrMetadataOnlyFieldSelector(listOpts ...client.ListOption) bool { + listOptions := &client.ListOptions{} + for _, opt := range listOpts { + opt.ApplyToList(listOptions) + } + + if listOptions.FieldSelector == nil { + return true + } + + for _, req := range listOptions.FieldSelector.Requirements() { + if !strings.HasPrefix(req.Field, "metadata") && req.Field != cache.NamespaceIndex { + return false + } + } + + return true } // MakeUnique takes either a *corev1.ConfigMap or a *corev1.Secret object and makes it immutable, i.e., it sets diff --git a/vendor/github.com/gardener/gardener/pkg/utils/secrets/certificate.go b/vendor/github.com/gardener/gardener/pkg/utils/secrets/certificate.go index 6e6fc53bd..26d315e14 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/secrets/certificate.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/secrets/certificate.go @@ -59,7 +59,7 @@ const ( ) // CertificateSecretConfig contains the specification a to-be-generated CA, server, or client certificate. -// It always contains a 2048-bit RSA private key. +// It always contains a 3072-bit RSA private key. type CertificateSecretConfig struct { Name string @@ -116,7 +116,7 @@ func (s *CertificateSecretConfig) GenerateCertificate() (*Certificate, error) { // If no cert type is given then we only return a certificate object that contains the CA. if s.CertType != "" { - privateKey, err := GenerateKey(rand.Reader, 2048) + privateKey, err := GenerateKey(rand.Reader, 3072) if err != nil { return nil, err } diff --git a/vendor/github.com/gardener/gardener/pkg/utils/secrets/vpn_tlsauth.go b/vendor/github.com/gardener/gardener/pkg/utils/secrets/vpn_tlsauth.go index 2d879ae9c..9674b2229 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/secrets/vpn_tlsauth.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/secrets/vpn_tlsauth.go @@ -71,7 +71,6 @@ func (v *VPNTLSAuth) SecretData() map[string][]byte { // generateVPNKey generates PSK for OpenVPN similar as generated by `openvpn --genkey` command. func generateVPNKey() ([]byte, error) { - allowedCharacters := "0123456789abcdef" keyString, err := utils.GenerateRandomStringFromCharset(512, allowedCharacters) if err != nil { diff --git a/vendor/github.com/gardener/gardener/pkg/utils/version/version.go b/vendor/github.com/gardener/gardener/pkg/utils/version/version.go index 5ba29768b..8ac379018 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/version/version.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/version/version.go @@ -97,3 +97,41 @@ func normalize(version string) string { } return v } + +// VersionRange represents a version range of type [AddedInVersion, RemovedInVersion). +type VersionRange struct { + AddedInVersion string + RemovedInVersion string +} + +// Contains returns true if the range contains the given version, false otherwise. +// The range contains the given version only if it's greater or equal than AddedInVersion (always true if AddedInVersion is empty), +// and less than RemovedInVersion (always true if RemovedInVersion is empty). +func (r *VersionRange) Contains(version string) (bool, error) { + var constraint string + switch { + case r.AddedInVersion != "" && r.RemovedInVersion == "": + constraint = fmt.Sprintf(">= %s", r.AddedInVersion) + case r.AddedInVersion == "" && r.RemovedInVersion != "": + constraint = fmt.Sprintf("< %s", r.RemovedInVersion) + case r.AddedInVersion != "" && r.RemovedInVersion != "": + constraint = fmt.Sprintf(">= %s, < %s", r.AddedInVersion, r.RemovedInVersion) + default: + constraint = "*" + } + return CheckVersionMeetsConstraint(version, constraint) +} + +// SupportedVersionRange returns the supported version range for the given API. +func (r *VersionRange) SupportedVersionRange() string { + switch { + case r.AddedInVersion != "" && r.RemovedInVersion == "": + return fmt.Sprintf("versions >= %s", r.AddedInVersion) + case r.AddedInVersion == "" && r.RemovedInVersion != "": + return fmt.Sprintf("versions < %s", r.RemovedInVersion) + case r.AddedInVersion != "" && r.RemovedInVersion != "": + return fmt.Sprintf("versions >= %s, < %s", r.AddedInVersion, r.RemovedInVersion) + default: + return "all kubernetes versions" + } +} diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/register.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/register.go index 136a0ec43..f5fa1c552 100644 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/register.go +++ b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/register.go @@ -43,29 +43,11 @@ var ( // the code-generation can find it. SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme is exposed for API installation - AddToScheme = SchemeBuilder.AddToScheme - localSchemeBuilder = &SchemeBuilder + AddToScheme = SchemeBuilder.AddToScheme ) func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &OpenStackMachineClass{}, - &OpenStackMachineClassList{}, - - &AWSMachineClass{}, - &AWSMachineClassList{}, - - &AzureMachineClass{}, - &AzureMachineClassList{}, - - &GCPMachineClass{}, - &GCPMachineClassList{}, - - &AlicloudMachineClass{}, - &AlicloudMachineClassList{}, - - &PacketMachineClass{}, - &PacketMachineClassList{}, &MachineClass{}, &MachineClassList{}, diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/types.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/types.go index 46e1d241b..ab83b4226 100644 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/types.go +++ b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/types.go @@ -87,11 +87,11 @@ type NodeTemplateSpec struct { // MachineTemplateSpec describes the data a machine should have when created from a template type MachineTemplateSpec struct { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta // Specification of the desired behavior of the machine. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec MachineSpec } @@ -121,11 +121,11 @@ type MachineTemplate struct { metav1.TypeMeta // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta // Template defines the machines that will be created from this machine template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Template MachineTemplateSpec } @@ -136,7 +136,7 @@ type MachineTemplateList struct { metav1.TypeMeta // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds metav1.ListMeta // List of machine templates @@ -187,6 +187,10 @@ type LastOperation struct { // Description of the current operation Description string + // ErrorCode of the current operation if any + // +optional + ErrorCode string + // Last update time of current operation LastUpdateTime metav1.Time @@ -211,7 +215,7 @@ const ( // MachineRunning means node is ready and running successfully MachineRunning MachinePhase = "Running" - // MachineRunning means node is terminating + // MachineTerminating means node is terminating MachineTerminating MachinePhase = "Terminating" // MachineUnknown indicates that the node is not ready at the movement @@ -220,7 +224,7 @@ const ( // MachineFailed means operation failed leading to machine status failure MachineFailed MachinePhase = "Failed" - // MachineCrashLoopBackOff means creation or deletion of the machine is failing. + // MachineCrashLoopBackOff means creation or deletion of the machine is failing. It means that machine object is present but there is no corresponding VM. MachineCrashLoopBackOff MachinePhase = "CrashLoopBackOff" ) @@ -619,576 +623,6 @@ type MachineDeploymentList struct { Items []MachineDeployment } -/********************** OpenStackMachineClass APIs ***************/ - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenStackMachineClass TODO -type OpenStackMachineClass struct { - metav1.ObjectMeta - - metav1.TypeMeta - - Spec OpenStackMachineClassSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenStackMachineClassList is a collection of OpenStackMachineClasses. -type OpenStackMachineClassList struct { - metav1.TypeMeta - - metav1.ListMeta - - Items []OpenStackMachineClass -} - -// OpenStackMachineClassSpec is the specification of a OpenStackMachineClass. -type OpenStackMachineClassSpec struct { - ImageID string - ImageName string - Region string - AvailabilityZone string - FlavorName string - KeyName string - SecurityGroups []string - Tags map[string]string - NetworkID string - Networks []OpenStackNetwork - SubnetID *string - SecretRef *corev1.SecretReference - CredentialsSecretRef *corev1.SecretReference - PodNetworkCidr string - RootDiskSize int // in GB - UseConfigDrive *bool - ServerGroupID *string -} - -type OpenStackNetwork struct { - Id string - Name string - PodNetwork bool -} - -/********************** AWSMachineClass APIs ***************/ - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AWSMachineClass TODO -type AWSMachineClass struct { - metav1.ObjectMeta - - metav1.TypeMeta - - Spec AWSMachineClassSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AWSMachineClassList is a collection of AWSMachineClasses. -type AWSMachineClassList struct { - metav1.TypeMeta - - metav1.ListMeta - - Items []AWSMachineClass -} - -// AWSMachineClassSpec is the specification of a AWSMachineClass. -type AWSMachineClassSpec struct { - AMI string - Region string - BlockDevices []AWSBlockDeviceMappingSpec - EbsOptimized bool - IAM AWSIAMProfileSpec - MachineType string - KeyName string - Monitoring bool - NetworkInterfaces []AWSNetworkInterfaceSpec - Tags map[string]string - SpotPrice *string - SecretRef *corev1.SecretReference - CredentialsSecretRef *corev1.SecretReference - - // TODO add more here -} - -type AWSBlockDeviceMappingSpec struct { - - // The device name exposed to the machine (for example, /dev/sdh or xvdh). - DeviceName string - - // Parameters used to automatically set up EBS volumes when the machine is - // launched. - Ebs AWSEbsBlockDeviceSpec - - // Suppresses the specified device included in the block device mapping of the - // AMI. - NoDevice string - - // The virtual device name (ephemeralN). Machine store volumes are numbered - // starting from 0. An machine type with 2 available machine store volumes - // can specify mappings for ephemeral0 and ephemeral1.The number of available - // machine store volumes depends on the machine type. After you connect to - // the machine, you must mount the volume. - // - // Constraints: For M3 machines, you must specify machine store volumes in - // the block device mapping for the machine. When you launch an M3 machine, - // we ignore any machine store volumes specified in the block device mapping - // for the AMI. - VirtualName string -} - -// Describes a block device for an EBS volume. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice -type AWSEbsBlockDeviceSpec struct { - - // Indicates whether the EBS volume is deleted on machine termination. - DeleteOnTermination *bool - - // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes - // may only be attached to machines that support Amazon EBS encryption. - Encrypted bool - - // The number of I/O operations per second (IOPS) that the volume supports. - // For io1, this represents the number of IOPS that are provisioned for the - // volume. For gp2, this represents the baseline performance of the volume and - // the rate at which the volume accumulates I/O credits for bursting. For more - // information about General Purpose SSD baseline performance, I/O credits, - // and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) - // in the Amazon Elastic Compute Cloud User Guide. - // - // Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for - // gp2 volumes. - // - // Condition: This parameter is required for requests to create io1 volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. - Iops int64 - - // Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed - // CMK under which the EBS volume is encrypted. - // - // This parameter is only supported on BlockDeviceMapping objects called by - // RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), - // RequestSpotFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html), - // and RequestSpotInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html). - KmsKeyID *string - - // The ID of the snapshot. - SnapshotID *string - - // The size of the volume, in GiB. - // - // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned - // IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for - // Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify - // a snapshot, the volume size must be equal to or larger than the snapshot - // size. - // - // Default: If you're creating the volume from a snapshot and don't specify - // a volume size, the default is the snapshot size. - VolumeSize int64 - - // The volume type: gp2, io1, st1, sc1, or standard. - // - // Default: standard - VolumeType string -} - -// Describes an IAM machine profile. -type AWSIAMProfileSpec struct { - // The Amazon Resource Name (ARN) of the machine profile. - ARN string - - // The name of the machine profile. - Name string -} - -// Describes a network interface. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MachineAWSNetworkInterfaceSpecification -type AWSNetworkInterfaceSpec struct { - - // Indicates whether to assign a public IPv4 address to an machine you launch - // in a VPC. The public IP address can only be assigned to a network interface - // for eth0, and can only be assigned to a new network interface, not an existing - // one. You cannot specify more than one network interface in the request. If - // launching into a default subnet, the default value is true. - AssociatePublicIPAddress *bool - - // If set to true, the interface is deleted when the machine is terminated. - // You can specify true only if creating a new network interface when launching - // an machine. - DeleteOnTermination *bool - - // The description of the network interface. Applies only if creating a network - // interface when launching an machine. - Description *string - - // The IDs of the security groups for the network interface. Applies only if - // creating a network interface when launching an machine. - SecurityGroupIDs []string - - // The ID of the subnet associated with the network string. Applies only if - // creating a network interface when launching an machine. - SubnetID string -} - -/********************** AzureMachineClass APIs ***************/ - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AzureMachineClass TODO -type AzureMachineClass struct { - metav1.ObjectMeta - - metav1.TypeMeta - - Spec AzureMachineClassSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AzureMachineClassList is a collection of AzureMachineClasses. -type AzureMachineClassList struct { - metav1.TypeMeta - - metav1.ListMeta - - Items []AzureMachineClass -} - -// AzureMachineClassSpec is the specification of a AzureMachineClass. -type AzureMachineClassSpec struct { - Location string - Tags map[string]string - Properties AzureVirtualMachineProperties - ResourceGroup string - SubnetInfo AzureSubnetInfo - SecretRef *corev1.SecretReference - CredentialsSecretRef *corev1.SecretReference -} - -// AzureVirtualMachineProperties is describes the properties of a Virtual Machine. -type AzureVirtualMachineProperties struct { - HardwareProfile AzureHardwareProfile - StorageProfile AzureStorageProfile - OsProfile AzureOSProfile - NetworkProfile AzureNetworkProfile - AvailabilitySet *AzureSubResource - IdentityID *string - Zone *int - MachineSet *AzureMachineSetConfig -} - -// AzureHardwareProfile is specifies the hardware settings for the virtual machine. -// Refer github.com/Azure/azure-sdk-for-go/arm/compute/models.go for VMSizes -type AzureHardwareProfile struct { - VMSize string -} - -// AzureStorageProfile is specifies the storage settings for the virtual machine disks. -type AzureStorageProfile struct { - ImageReference AzureImageReference - OsDisk AzureOSDisk - DataDisks []AzureDataDisk -} - -// AzureImageReference is specifies information about the image to use. You can specify information about platform images, -// marketplace images, or virtual machine images. This element is required when you want to use a platform image, -// marketplace image, or virtual machine image, but is not used in other creation operations. -type AzureImageReference struct { - ID string - URN *string -} - -// AzureOSDisk is specifies information about the operating system disk used by the virtual machine.

For more -// information about disks, see [About disks and VHDs for Azure virtual -// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -type AzureOSDisk struct { - Name string - Caching string - ManagedDisk AzureManagedDiskParameters - DiskSizeGB int32 - CreateOption string -} - -type AzureDataDisk struct { - Name string - Lun *int32 - Caching string - StorageAccountType string - DiskSizeGB int32 -} - -// AzureManagedDiskParameters is the parameters of a managed disk. -type AzureManagedDiskParameters struct { - ID string - StorageAccountType string -} - -// AzureOSProfile is specifies the operating system settings for the virtual machine. -type AzureOSProfile struct { - ComputerName string - AdminUsername string - AdminPassword string - CustomData string - LinuxConfiguration AzureLinuxConfiguration -} - -// AzureLinuxConfiguration is specifies the Linux operating system settings on the virtual machine.

For a list of -// supported Linux distributions, see [Linux on Azure-Endorsed -// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) -//

For running non-endorsed distributions, see [Information for Non-Endorsed -// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). -type AzureLinuxConfiguration struct { - DisablePasswordAuthentication bool - SSH AzureSSHConfiguration -} - -// AzureSSHConfiguration is SSH configuration for Linux based VMs running on Azure -type AzureSSHConfiguration struct { - PublicKeys AzureSSHPublicKey -} - -// AzureSSHPublicKey is contains information about SSH certificate public key and the path on the Linux VM where the public -// key is placed. -type AzureSSHPublicKey struct { - Path string - KeyData string -} - -// AzureNetworkProfile is specifies the network interfaces of the virtual machine. -type AzureNetworkProfile struct { - NetworkInterfaces AzureNetworkInterfaceReference - AcceleratedNetworking *bool -} - -// AzureNetworkInterfaceReference is describes a network interface reference. -type AzureNetworkInterfaceReference struct { - ID string - *AzureNetworkInterfaceReferenceProperties -} - -// AzureNetworkInterfaceReferenceProperties is describes a network interface reference properties. -type AzureNetworkInterfaceReferenceProperties struct { - Primary bool -} - -// AzureSubResource is the Sub Resource definition. -type AzureSubResource struct { - ID string -} - -// AzureSubnetInfo is the information containing the subnet details -type AzureSubnetInfo struct { - VnetName string - VnetResourceGroup *string - SubnetName string -} - -// AzureMachineSetConfig contains the information about the machine set -type AzureMachineSetConfig struct { - ID string - Kind string -} - -const ( - // MachineSetKindAvailabilitySet is the machine set kind for AvailabilitySet - MachineSetKindAvailabilitySet string = "availabilityset" - // MachineSetKindVMO is the machine set kind for VirtualMachineScaleSet Orchestration Mode VM (VMO) - MachineSetKindVMO string = "vmo" -) - -/********************** GCPMachineClass APIs ***************/ - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// GCPMachineClass TODO -type GCPMachineClass struct { - metav1.ObjectMeta - - metav1.TypeMeta - - Spec GCPMachineClassSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// GCPMachineClassList is a collection of GCPMachineClasses. -type GCPMachineClassList struct { - metav1.TypeMeta - - metav1.ListMeta - - Items []GCPMachineClass -} - -// GCPMachineClassSpec is the specification of a GCPMachineClass. -type GCPMachineClassSpec struct { - CanIpForward bool - DeletionProtection bool - Description *string - Disks []*GCPDisk - Labels map[string]string - MachineType string - Metadata []*GCPMetadata - NetworkInterfaces []*GCPNetworkInterface - Scheduling GCPScheduling - SecretRef *corev1.SecretReference - CredentialsSecretRef *corev1.SecretReference - ServiceAccounts []GCPServiceAccount - Tags []string - Region string - Zone string -} - -// GCPDisk describes disks for GCP. -type GCPDisk struct { - AutoDelete *bool - Boot bool - SizeGb int64 - Type string - Interface string - Image string - Labels map[string]string -} - -// GCPMetadata describes metadata for GCP. -type GCPMetadata struct { - Key string - Value *string -} - -// GCPNetworkInterface describes network interfaces for GCP -type GCPNetworkInterface struct { - DisableExternalIP bool - Network string - Subnetwork string -} - -// GCPScheduling describes scheduling configuration for GCP. -type GCPScheduling struct { - AutomaticRestart bool - OnHostMaintenance string - Preemptible bool -} - -// GCPServiceAccount describes service accounts for GCP. -type GCPServiceAccount struct { - Email string - Scopes []string -} - -/********************** AlicloudMachineClass APIs ***************/ - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AlicloudMachineClass -type AlicloudMachineClass struct { - metav1.ObjectMeta - - metav1.TypeMeta - - Spec AlicloudMachineClassSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AlicloudMachineClassList is a collection of AlicloudMachineClasses. -type AlicloudMachineClassList struct { - metav1.TypeMeta - - metav1.ListMeta - - Items []AlicloudMachineClass -} - -// AlicloudMachineClassSpec is the specification of a AlicloudMachineClass. -type AlicloudMachineClassSpec struct { - ImageID string - InstanceType string - Region string - ZoneID string - SecurityGroupID string - VSwitchID string - PrivateIPAddress string - SystemDisk *AlicloudSystemDisk - DataDisks []AlicloudDataDisk - InstanceChargeType string - InternetChargeType string - InternetMaxBandwidthIn *int - InternetMaxBandwidthOut *int - SpotStrategy string - IoOptimized string - Tags map[string]string - KeyPairName string - SecretRef *corev1.SecretReference - CredentialsSecretRef *corev1.SecretReference -} - -// AlicloudSystemDisk describes SystemDisk for Alicloud. -type AlicloudSystemDisk struct { - Category string - Size int -} - -// AlicloudDataDisk describes DataDisk for Alicloud. -type AlicloudDataDisk struct { - Name string - Category string - Description string - Encrypted bool - Size int - DeleteWithInstance *bool -} - -/********************** PacketMachineClass APIs ***************/ - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PacketMachineClass TODO -type PacketMachineClass struct { - metav1.ObjectMeta - - metav1.TypeMeta - - Spec PacketMachineClassSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PacketMachineClassList is a collection of PacketMachineClasses. -type PacketMachineClassList struct { - metav1.TypeMeta - - metav1.ListMeta - - Items []PacketMachineClass -} - -// PacketMachineClassSpec is the specification of a PacketMachineClass. -type PacketMachineClassSpec struct { - Facility []string // required - MachineType string // required - OS string // required - ProjectID string // required - BillingCycle string - Tags []string - SSHKeys []string - UserData string - - SecretRef *corev1.SecretReference - CredentialsSecretRef *corev1.SecretReference - - // TODO add more here -} - // +genclient // +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/alicoud_machineclass_types.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/alicoud_machineclass_types.go deleted file mode 100644 index 3c53eff7b..000000000 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/alicoud_machineclass_types.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// WARNING! -// IF YOU MODIFY ANY OF THE TYPES HERE COPY THEM TO ../types.go -// AND RUN `make generate` - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // AlicloudAccessKeyID is a constant for a key name that is part of the Alibaba cloud credentials. - AlicloudAccessKeyID string = "alicloudAccessKeyID" - // AlicloudAccessKeySecret is a constant for a key name that is part of the Alibaba cloud credentials. - AlicloudAccessKeySecret string = "alicloudAccessKeySecret" - - // AlicloudAlternativeAccessKeyID is a constant for a key name of a secret containing the Alibaba cloud - // credentials (access key id). - AlicloudAlternativeAccessKeyID = "accessKeyID" - // AlicloudAlternativeAccessKeySecret is a constant for a key name of a secret containing the Alibaba cloud - // credentials (access key secret). - AlicloudAlternativeAccessKeySecret = "accessKeySecret" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:printcolumn:name="Instance Type",type=string,JSONPath=`.spec.instanceType` -// +kubebuilder:printcolumn:name="Region",type=string,JSONPath=`.spec.region`,priority=1 -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - -// AlicloudMachineClass TODO -type AlicloudMachineClass struct { - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - Spec AlicloudMachineClassSpec `json:"spec,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// AlicloudMachineClassList is a collection of AlicloudMachineClasses. -type AlicloudMachineClassList struct { - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // +optional - Items []AlicloudMachineClass `json:"items"` -} - -// AlicloudMachineClassSpec is the specification of a AlicloudMachineClass. -type AlicloudMachineClassSpec struct { - ImageID string `json:"imageID"` - InstanceType string `json:"instanceType"` - Region string `json:"region"` - ZoneID string `json:"zoneID,omitempty"` - SecurityGroupID string `json:"securityGroupID,omitempty"` - VSwitchID string `json:"vSwitchID"` - PrivateIPAddress string `json:"privateIPAddress,omitempty"` - SystemDisk *AlicloudSystemDisk `json:"systemDisk,omitempty"` - DataDisks []AlicloudDataDisk `json:"dataDisks,omitempty"` - InstanceChargeType string `json:"instanceChargeType,omitempty"` - InternetChargeType string `json:"internetChargeType,omitempty"` - InternetMaxBandwidthIn *int `json:"internetMaxBandwidthIn,omitempty"` - InternetMaxBandwidthOut *int `json:"internetMaxBandwidthOut,omitempty"` - SpotStrategy string `json:"spotStrategy,omitempty"` - IoOptimized string `json:"IoOptimized,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - KeyPairName string `json:"keyPairName"` - SecretRef *corev1.SecretReference `json:"secretRef,omitempty"` - CredentialsSecretRef *corev1.SecretReference `json:"credentialsSecretRef,omitempty"` -} - -type AlicloudDataDisk struct { - Name string `json:"name,omitempty"` - Category string `json:"category,omitempty"` - // +optional - Description string `json:"description,omitempty"` - Encrypted bool `json:"encrypted,omitempty"` - DeleteWithInstance *bool `json:"deleteWithInstance,omitempty"` - Size int `json:"size,omitempty"` -} - -// AlicloudSystemDisk describes SystemDisk for Alicloud. -type AlicloudSystemDisk struct { - Category string `json:"category"` - Size int `json:"size"` -} diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/aws_machineclass_types.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/aws_machineclass_types.go deleted file mode 100644 index 9e4e54b6a..000000000 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/aws_machineclass_types.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// WARNING! -// IF YOU MODIFY ANY OF THE TYPES HERE COPY THEM TO ../types.go -// AND RUN `make generate` - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // AWSAccessKeyID is a constant for a key name that is part of the AWS cloud credentials. - AWSAccessKeyID string = "providerAccessKeyId" - // AWSSecretAccessKey is a constant for a key name that is part of the AWS cloud credentials. - AWSSecretAccessKey string = "providerSecretAccessKey" - - // AWSAlternativeAccessKeyID is a constant for a key name of a secret containing the AWS credentials (access key - // id). - AWSAlternativeAccessKeyID = "accessKeyID" - // AWSAlternativeAccessKeySecret is a constant for a key name of a secret containing the AWS credentials (access key - // secret). - AWSAlternativeSecretAccessKey = "secretAccessKey" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:printcolumn:name="Machine Type",type=string,JSONPath=`.spec.machineType` -// +kubebuilder:printcolumn:name="AMI",type=string,JSONPath=`.spec.ami` -// +kubebuilder:printcolumn:name="Region",type=string,JSONPath=`.spec.region`,priority=1 -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - -// AWSMachineClass TODO -type AWSMachineClass struct { - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - Spec AWSMachineClassSpec `json:"spec,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// AWSMachineClassList is a collection of AWSMachineClasses. -type AWSMachineClassList struct { - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // +optional - Items []AWSMachineClass `json:"items"` -} - -// AWSMachineClassSpec is the specification of a AWSMachineClass. -type AWSMachineClassSpec struct { - AMI string `json:"ami,omitempty"` - Region string `json:"region,omitempty"` - BlockDevices []AWSBlockDeviceMappingSpec `json:"blockDevices,omitempty"` - EbsOptimized bool `json:"ebsOptimized,omitempty"` - IAM AWSIAMProfileSpec `json:"iam,omitempty"` - MachineType string `json:"machineType,omitempty"` - KeyName string `json:"keyName,omitempty"` - Monitoring bool `json:"monitoring,omitempty"` - NetworkInterfaces []AWSNetworkInterfaceSpec `json:"networkInterfaces,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - SpotPrice *string `json:"spotPrice,omitempty"` - SecretRef *corev1.SecretReference `json:"secretRef,omitempty"` - CredentialsSecretRef *corev1.SecretReference `json:"credentialsSecretRef,omitempty"` - - // TODO add more here -} - -type AWSBlockDeviceMappingSpec struct { - - // The device name exposed to the machine (for example, /dev/sdh or xvdh). - DeviceName string `json:"deviceName,omitempty"` - - // Parameters used to automatically set up EBS volumes when the machine is - // launched. - Ebs AWSEbsBlockDeviceSpec `json:"ebs,omitempty"` - - // Suppresses the specified device included in the block device mapping of the - // AMI. - NoDevice string `json:"noDevice,omitempty"` - - // The virtual device name (ephemeralN). Machine store volumes are numbered - // starting from 0. An machine type with 2 available machine store volumes - // can specify mappings for ephemeral0 and ephemeral1.The number of available - // machine store volumes depends on the machine type. After you connect to - // the machine, you must mount the volume. - // - // Constraints: For M3 machines, you must specify machine store volumes in - // the block device mapping for the machine. When you launch an M3 machine, - // we ignore any machine store volumes specified in the block device mapping - // for the AMI. - VirtualName string `json:"virtualName,omitempty"` -} - -// Describes a block device for an EBS volume. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice -type AWSEbsBlockDeviceSpec struct { - - // Indicates whether the EBS volume is deleted on machine termination. - DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` - - // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes - // may only be attached to machines that support Amazon EBS encryption. - Encrypted bool `json:"encrypted,omitempty"` - - // The number of I/O operations per second (IOPS) that the volume supports. - // For io1, this represents the number of IOPS that are provisioned for the - // volume. For gp2, this represents the baseline performance of the volume and - // the rate at which the volume accumulates I/O credits for bursting. For more - // information about General Purpose SSD baseline performance, I/O credits, - // and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) - // in the Amazon Elastic Compute Cloud User Guide. - // - // Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for - // gp2 volumes. - // - // Condition: This parameter is required for requests to create io1 volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. - Iops int64 `json:"iops,omitempty"` - - // Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed - // CMK under which the EBS volume is encrypted. - // - // This parameter is only supported on BlockDeviceMapping objects called by - // RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), - // RequestSpotFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html), - // and RequestSpotInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html). - KmsKeyID *string `json:"kmsKeyID,omitempty"` - - // The ID of the snapshot. - SnapshotID *string `json:"snapshotID,omitempty"` - - // The size of the volume, in GiB. - // - // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned - // IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for - // Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify - // a snapshot, the volume size must be equal to or larger than the snapshot - // size. - // - // Default: If you're creating the volume from a snapshot and don't specify - // a volume size, the default is the snapshot size. - VolumeSize int64 `json:"volumeSize,omitempty"` - - // The volume type: gp2, io1, st1, sc1, or standard. - // - // Default: standard - VolumeType string `json:"volumeType,omitempty"` -} - -// Describes an IAM machine profile. -type AWSIAMProfileSpec struct { - // The Amazon Resource Name (ARN) of the machine profile. - ARN string `json:"arn,omitempty"` - - // The name of the machine profile. - Name string `json:"name,omitempty"` -} - -// Describes a network interface. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MachineAWSNetworkInterfaceSpecification -type AWSNetworkInterfaceSpec struct { - - // Indicates whether to assign a public IPv4 address to an machine you launch - // in a VPC. The public IP address can only be assigned to a network interface - // for eth0, and can only be assigned to a new network interface, not an existing - // one. You cannot specify more than one network interface in the request. If - // launching into a default subnet, the default value is true. - AssociatePublicIPAddress *bool `json:"associatePublicIPAddress,omitempty"` - - // If set to true, the interface is deleted when the machine is terminated. - // You can specify true only if creating a new network interface when launching - // an machine. - DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` - - // The description of the network interface. Applies only if creating a network - // interface when launching an machine. - Description *string `json:"description,omitempty"` - - // The IDs of the security groups for the network interface. Applies only if - // creating a network interface when launching an machine. - SecurityGroupIDs []string `json:"securityGroupIDs,omitempty"` - - // The ID of the subnet associated with the network string. Applies only if - // creating a network interface when launching an machine. - SubnetID string `json:"subnetID,omitempty"` -} diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/azure_machineclass_types.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/azure_machineclass_types.go deleted file mode 100644 index 690b89dbd..000000000 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/azure_machineclass_types.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// WARNING! -// IF YOU MODIFY ANY OF THE TYPES HERE COPY THEM TO ../types.go -// AND RUN `make generate` - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // AzureClientID is a constant for a key name that is part of the Azure cloud credentials. - AzureClientID string = "azureClientId" - // AzureClientSecret is a constant for a key name that is part of the Azure cloud credentials. - AzureClientSecret string = "azureClientSecret" - // AzureSubscriptionID is a constant for a key name that is part of the Azure cloud credentials. - AzureSubscriptionID string = "azureSubscriptionId" - // AzureTenantID is a constant for a key name that is part of the Azure cloud credentials. - AzureTenantID string = "azureTenantId" - - // AzureAlternativeClientID is a constant for a key name of a secret containing the Azure credentials (client id). - AzureAlternativeClientID = "clientID" - // AzureAlternativeClientSecret is a constant for a key name of a secret containing the Azure credentials (client - // secret). - AzureAlternativeClientSecret = "clientSecret" - // AzureAlternativeSubscriptionID is a constant for a key name of a secret containing the Azure credentials - // (subscription id). - AzureAlternativeSubscriptionID = "subscriptionID" - // AzureAlternativeTenantID is a constant for a key name of a secret containing the Azure credentials (tenant id). - AzureAlternativeTenantID = "tenantID" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:printcolumn:name="VM Size",type=string,JSONPath=`.spec.properties.hardwareProfile.vmSize` -// +kubebuilder:printcolumn:name="Location",type=string,JSONPath=`.spec.location`,priority=1 -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - -// AzureMachineClass TODO -type AzureMachineClass struct { - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - Spec AzureMachineClassSpec `json:"spec,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// AzureMachineClassList is a collection of AzureMachineClasses. -type AzureMachineClassList struct { - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // +optional - Items []AzureMachineClass `json:"items"` -} - -// AzureMachineClassSpec is the specification of a AzureMachineClass. -type AzureMachineClassSpec struct { - Location string `json:"location,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Properties AzureVirtualMachineProperties `json:"properties,omitempty"` - ResourceGroup string `json:"resourceGroup,omitempty"` - SubnetInfo AzureSubnetInfo `json:"subnetInfo,omitempty"` - SecretRef *corev1.SecretReference `json:"secretRef,omitempty"` - CredentialsSecretRef *corev1.SecretReference `json:"credentialsSecretRef,omitempty"` -} - -// AzureVirtualMachineProperties is describes the properties of a Virtual Machine. -type AzureVirtualMachineProperties struct { - HardwareProfile AzureHardwareProfile `json:"hardwareProfile,omitempty"` - StorageProfile AzureStorageProfile `json:"storageProfile,omitempty"` - OsProfile AzureOSProfile `json:"osProfile,omitempty"` - NetworkProfile AzureNetworkProfile `json:"networkProfile,omitempty"` - AvailabilitySet *AzureSubResource `json:"availabilitySet,omitempty"` - IdentityID *string `json:"identityID,omitempty"` - Zone *int `json:"zone,omitempty"` - MachineSet *AzureMachineSetConfig `json:"machineSet,omitempty"` -} - -// AzureHardwareProfile is specifies the hardware settings for the virtual machine. -// Refer github.com/Azure/azure-sdk-for-go/arm/compute/models.go for VMSizes -type AzureHardwareProfile struct { - VMSize string `json:"vmSize,omitempty"` -} - -// AzureStorageProfile is specifies the storage settings for the virtual machine disks. -type AzureStorageProfile struct { - ImageReference AzureImageReference `json:"imageReference,omitempty"` - OsDisk AzureOSDisk `json:"osDisk,omitempty"` - DataDisks []AzureDataDisk `json:"dataDisks,omitempty"` -} - -// AzureImageReference is specifies information about the image to use. You can specify information about platform images, -// marketplace images, or virtual machine images. This element is required when you want to use a platform image, -// marketplace image, or virtual machine image, but is not used in other creation operations. -type AzureImageReference struct { - ID string `json:"id,omitempty"` - // Uniform Resource Name of the OS image to be used , it has the format 'publisher:offer:sku:version' - URN *string `json:"urn,omitempty"` -} - -// AzureOSDisk is specifies information about the operating system disk used by the virtual machine.

For more -// information about disks, see [About disks and VHDs for Azure virtual -// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -type AzureOSDisk struct { - Name string `json:"name,omitempty"` - Caching string `json:"caching,omitempty"` - ManagedDisk AzureManagedDiskParameters `json:"managedDisk,omitempty"` - DiskSizeGB int32 `json:"diskSizeGB,omitempty"` - CreateOption string `json:"createOption,omitempty"` -} - -type AzureDataDisk struct { - Name string `json:"name,omitempty"` - Lun *int32 `json:"lun,omitempty"` - Caching string `json:"caching,omitempty"` - StorageAccountType string `json:"storageAccountType,omitempty"` - DiskSizeGB int32 `json:"diskSizeGB,omitempty"` -} - -// AzureManagedDiskParameters is the parameters of a managed disk. -type AzureManagedDiskParameters struct { - ID string `json:"id,omitempty"` - StorageAccountType string `json:"storageAccountType,omitempty"` -} - -// AzureOSProfile is specifies the operating system settings for the virtual machine. -type AzureOSProfile struct { - ComputerName string `json:"computerName,omitempty"` - AdminUsername string `json:"adminUsername,omitempty"` - AdminPassword string `json:"adminPassword,omitempty"` - CustomData string `json:"customData,omitempty"` - LinuxConfiguration AzureLinuxConfiguration `json:"linuxConfiguration,omitempty"` -} - -// AzureLinuxConfiguration is specifies the Linux operating system settings on the virtual machine.

For a list of -// supported Linux distributions, see [Linux on Azure-Endorsed -// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) -//

For running non-endorsed distributions, see [Information for Non-Endorsed -// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). -type AzureLinuxConfiguration struct { - DisablePasswordAuthentication bool `json:"disablePasswordAuthentication,omitempty"` - SSH AzureSSHConfiguration `json:"ssh,omitempty"` -} - -// AzureSSHConfiguration is SSH configuration for Linux based VMs running on Azure -type AzureSSHConfiguration struct { - PublicKeys AzureSSHPublicKey `json:"publicKeys,omitempty"` -} - -// AzureSSHPublicKey is contains information about SSH certificate public key and the path on the Linux VM where the public -// key is placed. -type AzureSSHPublicKey struct { - Path string `json:"path,omitempty"` - KeyData string `json:"keyData,omitempty"` -} - -// AzureNetworkProfile is specifies the network interfaces of the virtual machine. -type AzureNetworkProfile struct { - NetworkInterfaces AzureNetworkInterfaceReference `json:"networkInterfaces,omitempty"` - AcceleratedNetworking *bool `json:"acceleratedNetworking,omitempty"` -} - -// AzureNetworkInterfaceReference is describes a network interface reference. -type AzureNetworkInterfaceReference struct { - ID string `json:"id,omitempty"` - *AzureNetworkInterfaceReferenceProperties `json:"properties,omitempty"` -} - -// AzureNetworkInterfaceReferenceProperties is describes a network interface reference properties. -type AzureNetworkInterfaceReferenceProperties struct { - Primary bool `json:"primary,omitempty"` -} - -// AzureSubResource is the Sub Resource definition. -type AzureSubResource struct { - ID string `json:"id,omitempty"` -} - -// AzureSubnetInfo is the information containing the subnet details -type AzureSubnetInfo struct { - VnetName string `json:"vnetName,omitempty"` - VnetResourceGroup *string `json:"vnetResourceGroup,omitempty"` - SubnetName string `json:"subnetName,omitempty"` -} - -// AzureMachineSetConfig contains the information about the machine set -type AzureMachineSetConfig struct { - ID string `json:"id"` - Kind string `json:"kind"` -} - -const ( - // MachineSetKindAvailabilitySet is the machine set kind for AvailabilitySet - MachineSetKindAvailabilitySet string = "availabilityset" - // MachineSetKindVMO is the machine set kind for VirtualMachineScaleSet Orchestration Mode VM (VMO) - MachineSetKindVMO string = "vmo" -) diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/defaults.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/defaults.go deleted file mode 100644 index 7f5d7eca1..000000000 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/defaults.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright (c) 2017 SAP SE or an SAP affiliate company. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/gcp_machineclass_types.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/gcp_machineclass_types.go deleted file mode 100644 index 780f61560..000000000 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/gcp_machineclass_types.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// WARNING! -// IF YOU MODIFY ANY OF THE TYPES HERE COPY THEM TO ../types.go -// AND RUN `make generate` - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // GCPServiceAccountJSON is a constant for a key name that is part of the GCP cloud credentials. - GCPServiceAccountJSON string = "serviceAccountJSON" - - // GCPAlternativeServiceAccountJSON is a constant for a key name of a secret containing the GCP credentials (service - // account json). - GCPAlternativeServiceAccountJSON = "serviceaccount.json" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:printcolumn:name="Machine Type",type=string,JSONPath=`.spec.machineType` -// +kubebuilder:printcolumn:name="Region",type=string,JSONPath=`.spec.region`,priority=1 -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - -// GCPMachineClass TODO -type GCPMachineClass struct { - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - Spec GCPMachineClassSpec `json:"spec,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// GCPMachineClassList is a collection of GCPMachineClasses. -type GCPMachineClassList struct { - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // +optional - Items []GCPMachineClass `json:"items"` -} - -// GCPMachineClassSpec is the specification of a GCPMachineClass. -type GCPMachineClassSpec struct { - CanIpForward bool `json:"canIpForward"` - DeletionProtection bool `json:"deletionProtection"` - Description *string `json:"description,omitempty"` - Disks []*GCPDisk `json:"disks,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - MachineType string `json:"machineType"` - Metadata []*GCPMetadata `json:"metadata,omitempty"` - NetworkInterfaces []*GCPNetworkInterface `json:"networkInterfaces,omitempty"` - Scheduling GCPScheduling `json:"scheduling"` - SecretRef *corev1.SecretReference `json:"secretRef,omitempty"` - CredentialsSecretRef *corev1.SecretReference `json:"credentialsSecretRef,omitempty"` - ServiceAccounts []GCPServiceAccount `json:"serviceAccounts"` - Tags []string `json:"tags,omitempty"` - Region string `json:"region"` - Zone string `json:"zone"` -} - -// GCPDisk describes disks for GCP. -type GCPDisk struct { - AutoDelete *bool `json:"autoDelete"` - Boot bool `json:"boot"` - SizeGb int64 `json:"sizeGb"` - Type string `json:"type"` - Interface string `json:"interface"` - Image string `json:"image"` - Labels map[string]string `json:"labels"` -} - -// GCPMetadata describes metadata for GCP. -type GCPMetadata struct { - Key string `json:"key"` - Value *string `json:"value"` -} - -// GCPNetworkInterface describes network interfaces for GCP -type GCPNetworkInterface struct { - DisableExternalIP bool `json:"disableExternalIP,omitempty"` - Network string `json:"network,omitempty"` - Subnetwork string `json:"subnetwork,omitempty"` -} - -// GCPScheduling describes scheduling configuration for GCP. -type GCPScheduling struct { - AutomaticRestart bool `json:"automaticRestart"` - OnHostMaintenance string `json:"onHostMaintenance"` - Preemptible bool `json:"preemptible"` -} - -// GCPServiceAccount describes service accounts for GCP. -type GCPServiceAccount struct { - Email string `json:"email"` - Scopes []string `json:"scopes"` -} diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/machine_types.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/machine_types.go index e0eb04f0a..b5920e570 100644 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/machine_types.go +++ b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/machine_types.go @@ -122,6 +122,10 @@ type LastOperation struct { // Description of the current operation Description string `json:"description,omitempty"` + // ErrorCode of the current operation if any + // +optional + ErrorCode string `json:"errorCode,omitempty"` + // Last update time of current operation LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` @@ -146,7 +150,7 @@ const ( // MachineRunning means node is ready and running successfully MachineRunning MachinePhase = "Running" - // MachineRunning means node is terminating + // MachineTerminating means node is terminating MachineTerminating MachinePhase = "Terminating" // MachineUnknown indicates that the node is not ready at the movement @@ -155,7 +159,7 @@ const ( // MachineFailed means operation failed leading to machine status failure MachineFailed MachinePhase = "Failed" - // MachineCrashLoopBackOff means creation or deletion of the machine is failing. + // MachineCrashLoopBackOff means creation or deletion of the machine is failing. It means that machine object is present but there is no corresponding VM. MachineCrashLoopBackOff MachinePhase = "CrashLoopBackOff" ) @@ -188,7 +192,7 @@ const ( // MachineOperationHealthCheck indicates that the operation was a create MachineOperationHealthCheck MachineOperationType = "HealthCheck" - // MachineOperationDelete indicates that the operation was a create + // MachineOperationDelete indicates that the operation was a delete MachineOperationDelete MachineOperationType = "Delete" ) diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/openstack_machineclass_types.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/openstack_machineclass_types.go deleted file mode 100644 index 97e729e5c..000000000 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/openstack_machineclass_types.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// WARNING! -// IF YOU MODIFY ANY OF THE TYPES HERE COPY THEM TO ../types.go -// AND RUN `make generate` - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // OpenStackAuthURL is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackAuthURL string = "authURL" - // OpenStackCACert is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackCACert string = "caCert" - // OpenStackInsecure is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackInsecure string = "insecure" - // OpenStackDomainName is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackDomainName string = "domainName" - // OpenStackDomainID is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackDomainID string = "domainID" - // OpenStackTenantName is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackTenantName string = "tenantName" - // OpenStackTenantID is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackTenantID string = "tenantID" - // OpenStackUserDomainName is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackUserDomainName string = "userDomainName" - // OpenStackUserDomainID is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackUserDomainID string = "userDomainID" - // OpenStackUsername is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackUsername string = "username" - // OpenStackPassword is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackPassword string = "password" - // OpenStackClientCert is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackClientCert string = "clientCert" - // OpenStackClientKey is a constant for a key name that is part of the OpenStack cloud credentials. - OpenStackClientKey string = "clientKey" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:printcolumn:name="Flavor",type=string,JSONPath=`.spec.flavorName` -// +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.imageName` -// +kubebuilder:printcolumn:name="Region",type=string,JSONPath=`.spec.region`,priority=1 -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" -// OpenStackMachineClass TODO -type OpenStackMachineClass struct { - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - Spec OpenStackMachineClassSpec `json:"spec,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// OpenStackMachineClassList is a collection of OpenStackMachineClasses. -type OpenStackMachineClassList struct { - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // +optional - Items []OpenStackMachineClass `json:"items"` -} - -// OpenStackMachineClassSpec is the specification of a OpenStackMachineClass. -type OpenStackMachineClassSpec struct { - ImageID string `json:"imageID"` - ImageName string `json:"imageName"` - Region string `json:"region"` - AvailabilityZone string `json:"availabilityZone"` - FlavorName string `json:"flavorName"` - KeyName string `json:"keyName"` - SecurityGroups []string `json:"securityGroups"` - Tags map[string]string `json:"tags,omitempty"` - NetworkID string `json:"networkID"` - Networks []OpenStackNetwork `json:"networks,omitempty"` - SubnetID *string `json:"subnetID,omitempty"` - SecretRef *corev1.SecretReference `json:"secretRef,omitempty"` - CredentialsSecretRef *corev1.SecretReference `json:"credentialsSecretRef,omitempty"` - PodNetworkCidr string `json:"podNetworkCidr"` - RootDiskSize int `json:"rootDiskSize,omitempty"` // in GB - UseConfigDrive *bool `json:"useConfigDrive,omitempty"` - ServerGroupID *string `json:"serverGroupID,omitempty"` -} - -type OpenStackNetwork struct { - Id string `json:"id,omitempty"` // takes priority before name - Name string `json:"name,omitempty"` - PodNetwork bool `json:"podNetwork,omitempty"` -} diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/packet_machineclass_types.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/packet_machineclass_types.go deleted file mode 100644 index 82d1aad2f..000000000 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/packet_machineclass_types.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// WARNING! -// IF YOU MODIFY ANY OF THE TYPES HERE COPY THEM TO ../types.go -// AND RUN `make generate` - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // PacketAPIKey is a constant for a key name that is part of the Packet cloud credentials - PacketAPIKey string = "apiToken" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - -// PacketMachineClass TODO -type PacketMachineClass struct { - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - Spec PacketMachineClassSpec `json:"spec,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -// PacketMachineClassList is a collection of PacketMachineClasses. -type PacketMachineClassList struct { - // +optional - metav1.TypeMeta `json:",inline"` - - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // +optional - Items []PacketMachineClass `json:"items"` -} - -// PacketMachineClassSpec is the specification of a PacketMachineClass. -type PacketMachineClassSpec struct { - Facility []string `json:"facility"` - MachineType string `json:"machineType"` - BillingCycle string `json:"billingCycle"` - OS string `json:"OS"` - ProjectID string `json:"projectID"` - Tags []string `json:"tags,omitempty"` - SSHKeys []string `json:"sshKeys,omitempty"` - UserData string `json:"userdata,omitempty"` - - SecretRef *corev1.SecretReference `json:"secretRef,omitempty"` - CredentialsSecretRef *corev1.SecretReference `json:"credentialsSecretRef,omitempty"` -} diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/register.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/register.go index d6c85928d..46e47df2e 100644 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/register.go +++ b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/register.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -52,24 +52,6 @@ func Resource(resource string) schema.GroupResource { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &OpenStackMachineClass{}, - &OpenStackMachineClassList{}, - - &AWSMachineClass{}, - &AWSMachineClassList{}, - - &AzureMachineClass{}, - &AzureMachineClassList{}, - - &GCPMachineClass{}, - &GCPMachineClassList{}, - - &AlicloudMachineClass{}, - &AlicloudMachineClassList{}, - - &PacketMachineClass{}, - &PacketMachineClassList{}, - &MachineClass{}, &MachineClassList{}, diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/shared_types.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/shared_types.go index c5737c45b..b5473561d 100644 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/shared_types.go +++ b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/shared_types.go @@ -26,12 +26,12 @@ import ( type MachineTemplateSpec struct { // +kubebuilder:validation:XPreserveUnknownFields // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the machine. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec MachineSpec `json:"spec,omitempty"` } diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/zz_generated.conversion.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/zz_generated.conversion.go index c1d4a9a8a..d912fa928 100644 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/zz_generated.conversion.go +++ b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/zz_generated.conversion.go @@ -39,326 +39,6 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*AWSBlockDeviceMappingSpec)(nil), (*machine.AWSBlockDeviceMappingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AWSBlockDeviceMappingSpec_To_machine_AWSBlockDeviceMappingSpec(a.(*AWSBlockDeviceMappingSpec), b.(*machine.AWSBlockDeviceMappingSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AWSBlockDeviceMappingSpec)(nil), (*AWSBlockDeviceMappingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AWSBlockDeviceMappingSpec_To_v1alpha1_AWSBlockDeviceMappingSpec(a.(*machine.AWSBlockDeviceMappingSpec), b.(*AWSBlockDeviceMappingSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSEbsBlockDeviceSpec)(nil), (*machine.AWSEbsBlockDeviceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AWSEbsBlockDeviceSpec_To_machine_AWSEbsBlockDeviceSpec(a.(*AWSEbsBlockDeviceSpec), b.(*machine.AWSEbsBlockDeviceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AWSEbsBlockDeviceSpec)(nil), (*AWSEbsBlockDeviceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AWSEbsBlockDeviceSpec_To_v1alpha1_AWSEbsBlockDeviceSpec(a.(*machine.AWSEbsBlockDeviceSpec), b.(*AWSEbsBlockDeviceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSIAMProfileSpec)(nil), (*machine.AWSIAMProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AWSIAMProfileSpec_To_machine_AWSIAMProfileSpec(a.(*AWSIAMProfileSpec), b.(*machine.AWSIAMProfileSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AWSIAMProfileSpec)(nil), (*AWSIAMProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AWSIAMProfileSpec_To_v1alpha1_AWSIAMProfileSpec(a.(*machine.AWSIAMProfileSpec), b.(*AWSIAMProfileSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSMachineClass)(nil), (*machine.AWSMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AWSMachineClass_To_machine_AWSMachineClass(a.(*AWSMachineClass), b.(*machine.AWSMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AWSMachineClass)(nil), (*AWSMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AWSMachineClass_To_v1alpha1_AWSMachineClass(a.(*machine.AWSMachineClass), b.(*AWSMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSMachineClassList)(nil), (*machine.AWSMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AWSMachineClassList_To_machine_AWSMachineClassList(a.(*AWSMachineClassList), b.(*machine.AWSMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AWSMachineClassList)(nil), (*AWSMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AWSMachineClassList_To_v1alpha1_AWSMachineClassList(a.(*machine.AWSMachineClassList), b.(*AWSMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSMachineClassSpec)(nil), (*machine.AWSMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AWSMachineClassSpec_To_machine_AWSMachineClassSpec(a.(*AWSMachineClassSpec), b.(*machine.AWSMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AWSMachineClassSpec)(nil), (*AWSMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AWSMachineClassSpec_To_v1alpha1_AWSMachineClassSpec(a.(*machine.AWSMachineClassSpec), b.(*AWSMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSNetworkInterfaceSpec)(nil), (*machine.AWSNetworkInterfaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AWSNetworkInterfaceSpec_To_machine_AWSNetworkInterfaceSpec(a.(*AWSNetworkInterfaceSpec), b.(*machine.AWSNetworkInterfaceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AWSNetworkInterfaceSpec)(nil), (*AWSNetworkInterfaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AWSNetworkInterfaceSpec_To_v1alpha1_AWSNetworkInterfaceSpec(a.(*machine.AWSNetworkInterfaceSpec), b.(*AWSNetworkInterfaceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AlicloudDataDisk)(nil), (*machine.AlicloudDataDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AlicloudDataDisk_To_machine_AlicloudDataDisk(a.(*AlicloudDataDisk), b.(*machine.AlicloudDataDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AlicloudDataDisk)(nil), (*AlicloudDataDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AlicloudDataDisk_To_v1alpha1_AlicloudDataDisk(a.(*machine.AlicloudDataDisk), b.(*AlicloudDataDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AlicloudMachineClass)(nil), (*machine.AlicloudMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AlicloudMachineClass_To_machine_AlicloudMachineClass(a.(*AlicloudMachineClass), b.(*machine.AlicloudMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AlicloudMachineClass)(nil), (*AlicloudMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AlicloudMachineClass_To_v1alpha1_AlicloudMachineClass(a.(*machine.AlicloudMachineClass), b.(*AlicloudMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AlicloudMachineClassList)(nil), (*machine.AlicloudMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AlicloudMachineClassList_To_machine_AlicloudMachineClassList(a.(*AlicloudMachineClassList), b.(*machine.AlicloudMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AlicloudMachineClassList)(nil), (*AlicloudMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AlicloudMachineClassList_To_v1alpha1_AlicloudMachineClassList(a.(*machine.AlicloudMachineClassList), b.(*AlicloudMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AlicloudMachineClassSpec)(nil), (*machine.AlicloudMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AlicloudMachineClassSpec_To_machine_AlicloudMachineClassSpec(a.(*AlicloudMachineClassSpec), b.(*machine.AlicloudMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AlicloudMachineClassSpec)(nil), (*AlicloudMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AlicloudMachineClassSpec_To_v1alpha1_AlicloudMachineClassSpec(a.(*machine.AlicloudMachineClassSpec), b.(*AlicloudMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AlicloudSystemDisk)(nil), (*machine.AlicloudSystemDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AlicloudSystemDisk_To_machine_AlicloudSystemDisk(a.(*AlicloudSystemDisk), b.(*machine.AlicloudSystemDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AlicloudSystemDisk)(nil), (*AlicloudSystemDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AlicloudSystemDisk_To_v1alpha1_AlicloudSystemDisk(a.(*machine.AlicloudSystemDisk), b.(*AlicloudSystemDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureDataDisk)(nil), (*machine.AzureDataDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureDataDisk_To_machine_AzureDataDisk(a.(*AzureDataDisk), b.(*machine.AzureDataDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureDataDisk)(nil), (*AzureDataDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureDataDisk_To_v1alpha1_AzureDataDisk(a.(*machine.AzureDataDisk), b.(*AzureDataDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureHardwareProfile)(nil), (*machine.AzureHardwareProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureHardwareProfile_To_machine_AzureHardwareProfile(a.(*AzureHardwareProfile), b.(*machine.AzureHardwareProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureHardwareProfile)(nil), (*AzureHardwareProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureHardwareProfile_To_v1alpha1_AzureHardwareProfile(a.(*machine.AzureHardwareProfile), b.(*AzureHardwareProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureImageReference)(nil), (*machine.AzureImageReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureImageReference_To_machine_AzureImageReference(a.(*AzureImageReference), b.(*machine.AzureImageReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureImageReference)(nil), (*AzureImageReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureImageReference_To_v1alpha1_AzureImageReference(a.(*machine.AzureImageReference), b.(*AzureImageReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureLinuxConfiguration)(nil), (*machine.AzureLinuxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureLinuxConfiguration_To_machine_AzureLinuxConfiguration(a.(*AzureLinuxConfiguration), b.(*machine.AzureLinuxConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureLinuxConfiguration)(nil), (*AzureLinuxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureLinuxConfiguration_To_v1alpha1_AzureLinuxConfiguration(a.(*machine.AzureLinuxConfiguration), b.(*AzureLinuxConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureMachineClass)(nil), (*machine.AzureMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureMachineClass_To_machine_AzureMachineClass(a.(*AzureMachineClass), b.(*machine.AzureMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureMachineClass)(nil), (*AzureMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureMachineClass_To_v1alpha1_AzureMachineClass(a.(*machine.AzureMachineClass), b.(*AzureMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureMachineClassList)(nil), (*machine.AzureMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureMachineClassList_To_machine_AzureMachineClassList(a.(*AzureMachineClassList), b.(*machine.AzureMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureMachineClassList)(nil), (*AzureMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureMachineClassList_To_v1alpha1_AzureMachineClassList(a.(*machine.AzureMachineClassList), b.(*AzureMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureMachineClassSpec)(nil), (*machine.AzureMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureMachineClassSpec_To_machine_AzureMachineClassSpec(a.(*AzureMachineClassSpec), b.(*machine.AzureMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureMachineClassSpec)(nil), (*AzureMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureMachineClassSpec_To_v1alpha1_AzureMachineClassSpec(a.(*machine.AzureMachineClassSpec), b.(*AzureMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureMachineSetConfig)(nil), (*machine.AzureMachineSetConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureMachineSetConfig_To_machine_AzureMachineSetConfig(a.(*AzureMachineSetConfig), b.(*machine.AzureMachineSetConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureMachineSetConfig)(nil), (*AzureMachineSetConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureMachineSetConfig_To_v1alpha1_AzureMachineSetConfig(a.(*machine.AzureMachineSetConfig), b.(*AzureMachineSetConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureManagedDiskParameters)(nil), (*machine.AzureManagedDiskParameters)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureManagedDiskParameters_To_machine_AzureManagedDiskParameters(a.(*AzureManagedDiskParameters), b.(*machine.AzureManagedDiskParameters), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureManagedDiskParameters)(nil), (*AzureManagedDiskParameters)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureManagedDiskParameters_To_v1alpha1_AzureManagedDiskParameters(a.(*machine.AzureManagedDiskParameters), b.(*AzureManagedDiskParameters), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureNetworkInterfaceReference)(nil), (*machine.AzureNetworkInterfaceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureNetworkInterfaceReference_To_machine_AzureNetworkInterfaceReference(a.(*AzureNetworkInterfaceReference), b.(*machine.AzureNetworkInterfaceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureNetworkInterfaceReference)(nil), (*AzureNetworkInterfaceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureNetworkInterfaceReference_To_v1alpha1_AzureNetworkInterfaceReference(a.(*machine.AzureNetworkInterfaceReference), b.(*AzureNetworkInterfaceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureNetworkInterfaceReferenceProperties)(nil), (*machine.AzureNetworkInterfaceReferenceProperties)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureNetworkInterfaceReferenceProperties_To_machine_AzureNetworkInterfaceReferenceProperties(a.(*AzureNetworkInterfaceReferenceProperties), b.(*machine.AzureNetworkInterfaceReferenceProperties), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureNetworkInterfaceReferenceProperties)(nil), (*AzureNetworkInterfaceReferenceProperties)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureNetworkInterfaceReferenceProperties_To_v1alpha1_AzureNetworkInterfaceReferenceProperties(a.(*machine.AzureNetworkInterfaceReferenceProperties), b.(*AzureNetworkInterfaceReferenceProperties), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureNetworkProfile)(nil), (*machine.AzureNetworkProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureNetworkProfile_To_machine_AzureNetworkProfile(a.(*AzureNetworkProfile), b.(*machine.AzureNetworkProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureNetworkProfile)(nil), (*AzureNetworkProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureNetworkProfile_To_v1alpha1_AzureNetworkProfile(a.(*machine.AzureNetworkProfile), b.(*AzureNetworkProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureOSDisk)(nil), (*machine.AzureOSDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureOSDisk_To_machine_AzureOSDisk(a.(*AzureOSDisk), b.(*machine.AzureOSDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureOSDisk)(nil), (*AzureOSDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureOSDisk_To_v1alpha1_AzureOSDisk(a.(*machine.AzureOSDisk), b.(*AzureOSDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureOSProfile)(nil), (*machine.AzureOSProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureOSProfile_To_machine_AzureOSProfile(a.(*AzureOSProfile), b.(*machine.AzureOSProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureOSProfile)(nil), (*AzureOSProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureOSProfile_To_v1alpha1_AzureOSProfile(a.(*machine.AzureOSProfile), b.(*AzureOSProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureSSHConfiguration)(nil), (*machine.AzureSSHConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureSSHConfiguration_To_machine_AzureSSHConfiguration(a.(*AzureSSHConfiguration), b.(*machine.AzureSSHConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureSSHConfiguration)(nil), (*AzureSSHConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureSSHConfiguration_To_v1alpha1_AzureSSHConfiguration(a.(*machine.AzureSSHConfiguration), b.(*AzureSSHConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureSSHPublicKey)(nil), (*machine.AzureSSHPublicKey)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureSSHPublicKey_To_machine_AzureSSHPublicKey(a.(*AzureSSHPublicKey), b.(*machine.AzureSSHPublicKey), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureSSHPublicKey)(nil), (*AzureSSHPublicKey)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureSSHPublicKey_To_v1alpha1_AzureSSHPublicKey(a.(*machine.AzureSSHPublicKey), b.(*AzureSSHPublicKey), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureStorageProfile)(nil), (*machine.AzureStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureStorageProfile_To_machine_AzureStorageProfile(a.(*AzureStorageProfile), b.(*machine.AzureStorageProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureStorageProfile)(nil), (*AzureStorageProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureStorageProfile_To_v1alpha1_AzureStorageProfile(a.(*machine.AzureStorageProfile), b.(*AzureStorageProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureSubResource)(nil), (*machine.AzureSubResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureSubResource_To_machine_AzureSubResource(a.(*AzureSubResource), b.(*machine.AzureSubResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureSubResource)(nil), (*AzureSubResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureSubResource_To_v1alpha1_AzureSubResource(a.(*machine.AzureSubResource), b.(*AzureSubResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureSubnetInfo)(nil), (*machine.AzureSubnetInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureSubnetInfo_To_machine_AzureSubnetInfo(a.(*AzureSubnetInfo), b.(*machine.AzureSubnetInfo), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureSubnetInfo)(nil), (*AzureSubnetInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureSubnetInfo_To_v1alpha1_AzureSubnetInfo(a.(*machine.AzureSubnetInfo), b.(*AzureSubnetInfo), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AzureVirtualMachineProperties)(nil), (*machine.AzureVirtualMachineProperties)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AzureVirtualMachineProperties_To_machine_AzureVirtualMachineProperties(a.(*AzureVirtualMachineProperties), b.(*machine.AzureVirtualMachineProperties), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.AzureVirtualMachineProperties)(nil), (*AzureVirtualMachineProperties)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_AzureVirtualMachineProperties_To_v1alpha1_AzureVirtualMachineProperties(a.(*machine.AzureVirtualMachineProperties), b.(*AzureVirtualMachineProperties), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*ClassSpec)(nil), (*machine.ClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_ClassSpec_To_machine_ClassSpec(a.(*ClassSpec), b.(*machine.ClassSpec), scope) }); err != nil { @@ -379,86 +59,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*GCPDisk)(nil), (*machine.GCPDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_GCPDisk_To_machine_GCPDisk(a.(*GCPDisk), b.(*machine.GCPDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.GCPDisk)(nil), (*GCPDisk)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_GCPDisk_To_v1alpha1_GCPDisk(a.(*machine.GCPDisk), b.(*GCPDisk), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*GCPMachineClass)(nil), (*machine.GCPMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_GCPMachineClass_To_machine_GCPMachineClass(a.(*GCPMachineClass), b.(*machine.GCPMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.GCPMachineClass)(nil), (*GCPMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_GCPMachineClass_To_v1alpha1_GCPMachineClass(a.(*machine.GCPMachineClass), b.(*GCPMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*GCPMachineClassList)(nil), (*machine.GCPMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_GCPMachineClassList_To_machine_GCPMachineClassList(a.(*GCPMachineClassList), b.(*machine.GCPMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.GCPMachineClassList)(nil), (*GCPMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_GCPMachineClassList_To_v1alpha1_GCPMachineClassList(a.(*machine.GCPMachineClassList), b.(*GCPMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*GCPMachineClassSpec)(nil), (*machine.GCPMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_GCPMachineClassSpec_To_machine_GCPMachineClassSpec(a.(*GCPMachineClassSpec), b.(*machine.GCPMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.GCPMachineClassSpec)(nil), (*GCPMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_GCPMachineClassSpec_To_v1alpha1_GCPMachineClassSpec(a.(*machine.GCPMachineClassSpec), b.(*GCPMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*GCPMetadata)(nil), (*machine.GCPMetadata)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_GCPMetadata_To_machine_GCPMetadata(a.(*GCPMetadata), b.(*machine.GCPMetadata), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.GCPMetadata)(nil), (*GCPMetadata)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_GCPMetadata_To_v1alpha1_GCPMetadata(a.(*machine.GCPMetadata), b.(*GCPMetadata), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*GCPNetworkInterface)(nil), (*machine.GCPNetworkInterface)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_GCPNetworkInterface_To_machine_GCPNetworkInterface(a.(*GCPNetworkInterface), b.(*machine.GCPNetworkInterface), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.GCPNetworkInterface)(nil), (*GCPNetworkInterface)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_GCPNetworkInterface_To_v1alpha1_GCPNetworkInterface(a.(*machine.GCPNetworkInterface), b.(*GCPNetworkInterface), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*GCPScheduling)(nil), (*machine.GCPScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_GCPScheduling_To_machine_GCPScheduling(a.(*GCPScheduling), b.(*machine.GCPScheduling), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.GCPScheduling)(nil), (*GCPScheduling)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_GCPScheduling_To_v1alpha1_GCPScheduling(a.(*machine.GCPScheduling), b.(*GCPScheduling), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*GCPServiceAccount)(nil), (*machine.GCPServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_GCPServiceAccount_To_machine_GCPServiceAccount(a.(*GCPServiceAccount), b.(*machine.GCPServiceAccount), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.GCPServiceAccount)(nil), (*GCPServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_GCPServiceAccount_To_v1alpha1_GCPServiceAccount(a.(*machine.GCPServiceAccount), b.(*GCPServiceAccount), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*LastOperation)(nil), (*machine.LastOperation)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_LastOperation_To_machine_LastOperation(a.(*LastOperation), b.(*machine.LastOperation), scope) }); err != nil { @@ -665,1059 +265,53 @@ func RegisterConversions(s *runtime.Scheme) error { return err } if err := s.AddGeneratedConversionFunc((*machine.MachineTemplateSpec)(nil), (*MachineTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_MachineTemplateSpec_To_v1alpha1_MachineTemplateSpec(a.(*machine.MachineTemplateSpec), b.(*MachineTemplateSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NodeTemplate)(nil), (*machine.NodeTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NodeTemplate_To_machine_NodeTemplate(a.(*NodeTemplate), b.(*machine.NodeTemplate), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.NodeTemplate)(nil), (*NodeTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_NodeTemplate_To_v1alpha1_NodeTemplate(a.(*machine.NodeTemplate), b.(*NodeTemplate), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NodeTemplateSpec)(nil), (*machine.NodeTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NodeTemplateSpec_To_machine_NodeTemplateSpec(a.(*NodeTemplateSpec), b.(*machine.NodeTemplateSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.NodeTemplateSpec)(nil), (*NodeTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_NodeTemplateSpec_To_v1alpha1_NodeTemplateSpec(a.(*machine.NodeTemplateSpec), b.(*NodeTemplateSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*OpenStackMachineClass)(nil), (*machine.OpenStackMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_OpenStackMachineClass_To_machine_OpenStackMachineClass(a.(*OpenStackMachineClass), b.(*machine.OpenStackMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.OpenStackMachineClass)(nil), (*OpenStackMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_OpenStackMachineClass_To_v1alpha1_OpenStackMachineClass(a.(*machine.OpenStackMachineClass), b.(*OpenStackMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*OpenStackMachineClassList)(nil), (*machine.OpenStackMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_OpenStackMachineClassList_To_machine_OpenStackMachineClassList(a.(*OpenStackMachineClassList), b.(*machine.OpenStackMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.OpenStackMachineClassList)(nil), (*OpenStackMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_OpenStackMachineClassList_To_v1alpha1_OpenStackMachineClassList(a.(*machine.OpenStackMachineClassList), b.(*OpenStackMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*OpenStackMachineClassSpec)(nil), (*machine.OpenStackMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_OpenStackMachineClassSpec_To_machine_OpenStackMachineClassSpec(a.(*OpenStackMachineClassSpec), b.(*machine.OpenStackMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.OpenStackMachineClassSpec)(nil), (*OpenStackMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_OpenStackMachineClassSpec_To_v1alpha1_OpenStackMachineClassSpec(a.(*machine.OpenStackMachineClassSpec), b.(*OpenStackMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*OpenStackNetwork)(nil), (*machine.OpenStackNetwork)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_OpenStackNetwork_To_machine_OpenStackNetwork(a.(*OpenStackNetwork), b.(*machine.OpenStackNetwork), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.OpenStackNetwork)(nil), (*OpenStackNetwork)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_OpenStackNetwork_To_v1alpha1_OpenStackNetwork(a.(*machine.OpenStackNetwork), b.(*OpenStackNetwork), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PacketMachineClass)(nil), (*machine.PacketMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PacketMachineClass_To_machine_PacketMachineClass(a.(*PacketMachineClass), b.(*machine.PacketMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.PacketMachineClass)(nil), (*PacketMachineClass)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_PacketMachineClass_To_v1alpha1_PacketMachineClass(a.(*machine.PacketMachineClass), b.(*PacketMachineClass), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PacketMachineClassList)(nil), (*machine.PacketMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PacketMachineClassList_To_machine_PacketMachineClassList(a.(*PacketMachineClassList), b.(*machine.PacketMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.PacketMachineClassList)(nil), (*PacketMachineClassList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_PacketMachineClassList_To_v1alpha1_PacketMachineClassList(a.(*machine.PacketMachineClassList), b.(*PacketMachineClassList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PacketMachineClassSpec)(nil), (*machine.PacketMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PacketMachineClassSpec_To_machine_PacketMachineClassSpec(a.(*PacketMachineClassSpec), b.(*machine.PacketMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.PacketMachineClassSpec)(nil), (*PacketMachineClassSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_PacketMachineClassSpec_To_v1alpha1_PacketMachineClassSpec(a.(*machine.PacketMachineClassSpec), b.(*PacketMachineClassSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*RollbackConfig)(nil), (*machine.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_RollbackConfig_To_machine_RollbackConfig(a.(*RollbackConfig), b.(*machine.RollbackConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.RollbackConfig)(nil), (*RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_RollbackConfig_To_v1alpha1_RollbackConfig(a.(*machine.RollbackConfig), b.(*RollbackConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*RollingUpdateMachineDeployment)(nil), (*machine.RollingUpdateMachineDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_RollingUpdateMachineDeployment_To_machine_RollingUpdateMachineDeployment(a.(*RollingUpdateMachineDeployment), b.(*machine.RollingUpdateMachineDeployment), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*machine.RollingUpdateMachineDeployment)(nil), (*RollingUpdateMachineDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_machine_RollingUpdateMachineDeployment_To_v1alpha1_RollingUpdateMachineDeployment(a.(*machine.RollingUpdateMachineDeployment), b.(*RollingUpdateMachineDeployment), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_AWSBlockDeviceMappingSpec_To_machine_AWSBlockDeviceMappingSpec(in *AWSBlockDeviceMappingSpec, out *machine.AWSBlockDeviceMappingSpec, s conversion.Scope) error { - out.DeviceName = in.DeviceName - if err := Convert_v1alpha1_AWSEbsBlockDeviceSpec_To_machine_AWSEbsBlockDeviceSpec(&in.Ebs, &out.Ebs, s); err != nil { - return err - } - out.NoDevice = in.NoDevice - out.VirtualName = in.VirtualName - return nil -} - -// Convert_v1alpha1_AWSBlockDeviceMappingSpec_To_machine_AWSBlockDeviceMappingSpec is an autogenerated conversion function. -func Convert_v1alpha1_AWSBlockDeviceMappingSpec_To_machine_AWSBlockDeviceMappingSpec(in *AWSBlockDeviceMappingSpec, out *machine.AWSBlockDeviceMappingSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_AWSBlockDeviceMappingSpec_To_machine_AWSBlockDeviceMappingSpec(in, out, s) -} - -func autoConvert_machine_AWSBlockDeviceMappingSpec_To_v1alpha1_AWSBlockDeviceMappingSpec(in *machine.AWSBlockDeviceMappingSpec, out *AWSBlockDeviceMappingSpec, s conversion.Scope) error { - out.DeviceName = in.DeviceName - if err := Convert_machine_AWSEbsBlockDeviceSpec_To_v1alpha1_AWSEbsBlockDeviceSpec(&in.Ebs, &out.Ebs, s); err != nil { - return err - } - out.NoDevice = in.NoDevice - out.VirtualName = in.VirtualName - return nil -} - -// Convert_machine_AWSBlockDeviceMappingSpec_To_v1alpha1_AWSBlockDeviceMappingSpec is an autogenerated conversion function. -func Convert_machine_AWSBlockDeviceMappingSpec_To_v1alpha1_AWSBlockDeviceMappingSpec(in *machine.AWSBlockDeviceMappingSpec, out *AWSBlockDeviceMappingSpec, s conversion.Scope) error { - return autoConvert_machine_AWSBlockDeviceMappingSpec_To_v1alpha1_AWSBlockDeviceMappingSpec(in, out, s) -} - -func autoConvert_v1alpha1_AWSEbsBlockDeviceSpec_To_machine_AWSEbsBlockDeviceSpec(in *AWSEbsBlockDeviceSpec, out *machine.AWSEbsBlockDeviceSpec, s conversion.Scope) error { - out.DeleteOnTermination = (*bool)(unsafe.Pointer(in.DeleteOnTermination)) - out.Encrypted = in.Encrypted - out.Iops = in.Iops - out.KmsKeyID = (*string)(unsafe.Pointer(in.KmsKeyID)) - out.SnapshotID = (*string)(unsafe.Pointer(in.SnapshotID)) - out.VolumeSize = in.VolumeSize - out.VolumeType = in.VolumeType - return nil -} - -// Convert_v1alpha1_AWSEbsBlockDeviceSpec_To_machine_AWSEbsBlockDeviceSpec is an autogenerated conversion function. -func Convert_v1alpha1_AWSEbsBlockDeviceSpec_To_machine_AWSEbsBlockDeviceSpec(in *AWSEbsBlockDeviceSpec, out *machine.AWSEbsBlockDeviceSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_AWSEbsBlockDeviceSpec_To_machine_AWSEbsBlockDeviceSpec(in, out, s) -} - -func autoConvert_machine_AWSEbsBlockDeviceSpec_To_v1alpha1_AWSEbsBlockDeviceSpec(in *machine.AWSEbsBlockDeviceSpec, out *AWSEbsBlockDeviceSpec, s conversion.Scope) error { - out.DeleteOnTermination = (*bool)(unsafe.Pointer(in.DeleteOnTermination)) - out.Encrypted = in.Encrypted - out.Iops = in.Iops - out.KmsKeyID = (*string)(unsafe.Pointer(in.KmsKeyID)) - out.SnapshotID = (*string)(unsafe.Pointer(in.SnapshotID)) - out.VolumeSize = in.VolumeSize - out.VolumeType = in.VolumeType - return nil -} - -// Convert_machine_AWSEbsBlockDeviceSpec_To_v1alpha1_AWSEbsBlockDeviceSpec is an autogenerated conversion function. -func Convert_machine_AWSEbsBlockDeviceSpec_To_v1alpha1_AWSEbsBlockDeviceSpec(in *machine.AWSEbsBlockDeviceSpec, out *AWSEbsBlockDeviceSpec, s conversion.Scope) error { - return autoConvert_machine_AWSEbsBlockDeviceSpec_To_v1alpha1_AWSEbsBlockDeviceSpec(in, out, s) -} - -func autoConvert_v1alpha1_AWSIAMProfileSpec_To_machine_AWSIAMProfileSpec(in *AWSIAMProfileSpec, out *machine.AWSIAMProfileSpec, s conversion.Scope) error { - out.ARN = in.ARN - out.Name = in.Name - return nil -} - -// Convert_v1alpha1_AWSIAMProfileSpec_To_machine_AWSIAMProfileSpec is an autogenerated conversion function. -func Convert_v1alpha1_AWSIAMProfileSpec_To_machine_AWSIAMProfileSpec(in *AWSIAMProfileSpec, out *machine.AWSIAMProfileSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_AWSIAMProfileSpec_To_machine_AWSIAMProfileSpec(in, out, s) -} - -func autoConvert_machine_AWSIAMProfileSpec_To_v1alpha1_AWSIAMProfileSpec(in *machine.AWSIAMProfileSpec, out *AWSIAMProfileSpec, s conversion.Scope) error { - out.ARN = in.ARN - out.Name = in.Name - return nil -} - -// Convert_machine_AWSIAMProfileSpec_To_v1alpha1_AWSIAMProfileSpec is an autogenerated conversion function. -func Convert_machine_AWSIAMProfileSpec_To_v1alpha1_AWSIAMProfileSpec(in *machine.AWSIAMProfileSpec, out *AWSIAMProfileSpec, s conversion.Scope) error { - return autoConvert_machine_AWSIAMProfileSpec_To_v1alpha1_AWSIAMProfileSpec(in, out, s) -} - -func autoConvert_v1alpha1_AWSMachineClass_To_machine_AWSMachineClass(in *AWSMachineClass, out *machine.AWSMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_AWSMachineClassSpec_To_machine_AWSMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_AWSMachineClass_To_machine_AWSMachineClass is an autogenerated conversion function. -func Convert_v1alpha1_AWSMachineClass_To_machine_AWSMachineClass(in *AWSMachineClass, out *machine.AWSMachineClass, s conversion.Scope) error { - return autoConvert_v1alpha1_AWSMachineClass_To_machine_AWSMachineClass(in, out, s) -} - -func autoConvert_machine_AWSMachineClass_To_v1alpha1_AWSMachineClass(in *machine.AWSMachineClass, out *AWSMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_machine_AWSMachineClassSpec_To_v1alpha1_AWSMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_machine_AWSMachineClass_To_v1alpha1_AWSMachineClass is an autogenerated conversion function. -func Convert_machine_AWSMachineClass_To_v1alpha1_AWSMachineClass(in *machine.AWSMachineClass, out *AWSMachineClass, s conversion.Scope) error { - return autoConvert_machine_AWSMachineClass_To_v1alpha1_AWSMachineClass(in, out, s) -} - -func autoConvert_v1alpha1_AWSMachineClassList_To_machine_AWSMachineClassList(in *AWSMachineClassList, out *machine.AWSMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]machine.AWSMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_AWSMachineClassList_To_machine_AWSMachineClassList is an autogenerated conversion function. -func Convert_v1alpha1_AWSMachineClassList_To_machine_AWSMachineClassList(in *AWSMachineClassList, out *machine.AWSMachineClassList, s conversion.Scope) error { - return autoConvert_v1alpha1_AWSMachineClassList_To_machine_AWSMachineClassList(in, out, s) -} - -func autoConvert_machine_AWSMachineClassList_To_v1alpha1_AWSMachineClassList(in *machine.AWSMachineClassList, out *AWSMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]AWSMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_machine_AWSMachineClassList_To_v1alpha1_AWSMachineClassList is an autogenerated conversion function. -func Convert_machine_AWSMachineClassList_To_v1alpha1_AWSMachineClassList(in *machine.AWSMachineClassList, out *AWSMachineClassList, s conversion.Scope) error { - return autoConvert_machine_AWSMachineClassList_To_v1alpha1_AWSMachineClassList(in, out, s) -} - -func autoConvert_v1alpha1_AWSMachineClassSpec_To_machine_AWSMachineClassSpec(in *AWSMachineClassSpec, out *machine.AWSMachineClassSpec, s conversion.Scope) error { - out.AMI = in.AMI - out.Region = in.Region - out.BlockDevices = *(*[]machine.AWSBlockDeviceMappingSpec)(unsafe.Pointer(&in.BlockDevices)) - out.EbsOptimized = in.EbsOptimized - if err := Convert_v1alpha1_AWSIAMProfileSpec_To_machine_AWSIAMProfileSpec(&in.IAM, &out.IAM, s); err != nil { - return err - } - out.MachineType = in.MachineType - out.KeyName = in.KeyName - out.Monitoring = in.Monitoring - out.NetworkInterfaces = *(*[]machine.AWSNetworkInterfaceSpec)(unsafe.Pointer(&in.NetworkInterfaces)) - out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags)) - out.SpotPrice = (*string)(unsafe.Pointer(in.SpotPrice)) - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - return nil -} - -// Convert_v1alpha1_AWSMachineClassSpec_To_machine_AWSMachineClassSpec is an autogenerated conversion function. -func Convert_v1alpha1_AWSMachineClassSpec_To_machine_AWSMachineClassSpec(in *AWSMachineClassSpec, out *machine.AWSMachineClassSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_AWSMachineClassSpec_To_machine_AWSMachineClassSpec(in, out, s) -} - -func autoConvert_machine_AWSMachineClassSpec_To_v1alpha1_AWSMachineClassSpec(in *machine.AWSMachineClassSpec, out *AWSMachineClassSpec, s conversion.Scope) error { - out.AMI = in.AMI - out.Region = in.Region - out.BlockDevices = *(*[]AWSBlockDeviceMappingSpec)(unsafe.Pointer(&in.BlockDevices)) - out.EbsOptimized = in.EbsOptimized - if err := Convert_machine_AWSIAMProfileSpec_To_v1alpha1_AWSIAMProfileSpec(&in.IAM, &out.IAM, s); err != nil { - return err - } - out.MachineType = in.MachineType - out.KeyName = in.KeyName - out.Monitoring = in.Monitoring - out.NetworkInterfaces = *(*[]AWSNetworkInterfaceSpec)(unsafe.Pointer(&in.NetworkInterfaces)) - out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags)) - out.SpotPrice = (*string)(unsafe.Pointer(in.SpotPrice)) - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - return nil -} - -// Convert_machine_AWSMachineClassSpec_To_v1alpha1_AWSMachineClassSpec is an autogenerated conversion function. -func Convert_machine_AWSMachineClassSpec_To_v1alpha1_AWSMachineClassSpec(in *machine.AWSMachineClassSpec, out *AWSMachineClassSpec, s conversion.Scope) error { - return autoConvert_machine_AWSMachineClassSpec_To_v1alpha1_AWSMachineClassSpec(in, out, s) -} - -func autoConvert_v1alpha1_AWSNetworkInterfaceSpec_To_machine_AWSNetworkInterfaceSpec(in *AWSNetworkInterfaceSpec, out *machine.AWSNetworkInterfaceSpec, s conversion.Scope) error { - out.AssociatePublicIPAddress = (*bool)(unsafe.Pointer(in.AssociatePublicIPAddress)) - out.DeleteOnTermination = (*bool)(unsafe.Pointer(in.DeleteOnTermination)) - out.Description = (*string)(unsafe.Pointer(in.Description)) - out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs)) - out.SubnetID = in.SubnetID - return nil -} - -// Convert_v1alpha1_AWSNetworkInterfaceSpec_To_machine_AWSNetworkInterfaceSpec is an autogenerated conversion function. -func Convert_v1alpha1_AWSNetworkInterfaceSpec_To_machine_AWSNetworkInterfaceSpec(in *AWSNetworkInterfaceSpec, out *machine.AWSNetworkInterfaceSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_AWSNetworkInterfaceSpec_To_machine_AWSNetworkInterfaceSpec(in, out, s) -} - -func autoConvert_machine_AWSNetworkInterfaceSpec_To_v1alpha1_AWSNetworkInterfaceSpec(in *machine.AWSNetworkInterfaceSpec, out *AWSNetworkInterfaceSpec, s conversion.Scope) error { - out.AssociatePublicIPAddress = (*bool)(unsafe.Pointer(in.AssociatePublicIPAddress)) - out.DeleteOnTermination = (*bool)(unsafe.Pointer(in.DeleteOnTermination)) - out.Description = (*string)(unsafe.Pointer(in.Description)) - out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs)) - out.SubnetID = in.SubnetID - return nil -} - -// Convert_machine_AWSNetworkInterfaceSpec_To_v1alpha1_AWSNetworkInterfaceSpec is an autogenerated conversion function. -func Convert_machine_AWSNetworkInterfaceSpec_To_v1alpha1_AWSNetworkInterfaceSpec(in *machine.AWSNetworkInterfaceSpec, out *AWSNetworkInterfaceSpec, s conversion.Scope) error { - return autoConvert_machine_AWSNetworkInterfaceSpec_To_v1alpha1_AWSNetworkInterfaceSpec(in, out, s) -} - -func autoConvert_v1alpha1_AlicloudDataDisk_To_machine_AlicloudDataDisk(in *AlicloudDataDisk, out *machine.AlicloudDataDisk, s conversion.Scope) error { - out.Name = in.Name - out.Category = in.Category - out.Description = in.Description - out.Encrypted = in.Encrypted - out.DeleteWithInstance = (*bool)(unsafe.Pointer(in.DeleteWithInstance)) - out.Size = in.Size - return nil -} - -// Convert_v1alpha1_AlicloudDataDisk_To_machine_AlicloudDataDisk is an autogenerated conversion function. -func Convert_v1alpha1_AlicloudDataDisk_To_machine_AlicloudDataDisk(in *AlicloudDataDisk, out *machine.AlicloudDataDisk, s conversion.Scope) error { - return autoConvert_v1alpha1_AlicloudDataDisk_To_machine_AlicloudDataDisk(in, out, s) -} - -func autoConvert_machine_AlicloudDataDisk_To_v1alpha1_AlicloudDataDisk(in *machine.AlicloudDataDisk, out *AlicloudDataDisk, s conversion.Scope) error { - out.Name = in.Name - out.Category = in.Category - out.Description = in.Description - out.Encrypted = in.Encrypted - out.Size = in.Size - out.DeleteWithInstance = (*bool)(unsafe.Pointer(in.DeleteWithInstance)) - return nil -} - -// Convert_machine_AlicloudDataDisk_To_v1alpha1_AlicloudDataDisk is an autogenerated conversion function. -func Convert_machine_AlicloudDataDisk_To_v1alpha1_AlicloudDataDisk(in *machine.AlicloudDataDisk, out *AlicloudDataDisk, s conversion.Scope) error { - return autoConvert_machine_AlicloudDataDisk_To_v1alpha1_AlicloudDataDisk(in, out, s) -} - -func autoConvert_v1alpha1_AlicloudMachineClass_To_machine_AlicloudMachineClass(in *AlicloudMachineClass, out *machine.AlicloudMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_AlicloudMachineClassSpec_To_machine_AlicloudMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_AlicloudMachineClass_To_machine_AlicloudMachineClass is an autogenerated conversion function. -func Convert_v1alpha1_AlicloudMachineClass_To_machine_AlicloudMachineClass(in *AlicloudMachineClass, out *machine.AlicloudMachineClass, s conversion.Scope) error { - return autoConvert_v1alpha1_AlicloudMachineClass_To_machine_AlicloudMachineClass(in, out, s) -} - -func autoConvert_machine_AlicloudMachineClass_To_v1alpha1_AlicloudMachineClass(in *machine.AlicloudMachineClass, out *AlicloudMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_machine_AlicloudMachineClassSpec_To_v1alpha1_AlicloudMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_machine_AlicloudMachineClass_To_v1alpha1_AlicloudMachineClass is an autogenerated conversion function. -func Convert_machine_AlicloudMachineClass_To_v1alpha1_AlicloudMachineClass(in *machine.AlicloudMachineClass, out *AlicloudMachineClass, s conversion.Scope) error { - return autoConvert_machine_AlicloudMachineClass_To_v1alpha1_AlicloudMachineClass(in, out, s) -} - -func autoConvert_v1alpha1_AlicloudMachineClassList_To_machine_AlicloudMachineClassList(in *AlicloudMachineClassList, out *machine.AlicloudMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]machine.AlicloudMachineClass, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_AlicloudMachineClass_To_machine_AlicloudMachineClass(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha1_AlicloudMachineClassList_To_machine_AlicloudMachineClassList is an autogenerated conversion function. -func Convert_v1alpha1_AlicloudMachineClassList_To_machine_AlicloudMachineClassList(in *AlicloudMachineClassList, out *machine.AlicloudMachineClassList, s conversion.Scope) error { - return autoConvert_v1alpha1_AlicloudMachineClassList_To_machine_AlicloudMachineClassList(in, out, s) -} - -func autoConvert_machine_AlicloudMachineClassList_To_v1alpha1_AlicloudMachineClassList(in *machine.AlicloudMachineClassList, out *AlicloudMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AlicloudMachineClass, len(*in)) - for i := range *in { - if err := Convert_machine_AlicloudMachineClass_To_v1alpha1_AlicloudMachineClass(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_machine_AlicloudMachineClassList_To_v1alpha1_AlicloudMachineClassList is an autogenerated conversion function. -func Convert_machine_AlicloudMachineClassList_To_v1alpha1_AlicloudMachineClassList(in *machine.AlicloudMachineClassList, out *AlicloudMachineClassList, s conversion.Scope) error { - return autoConvert_machine_AlicloudMachineClassList_To_v1alpha1_AlicloudMachineClassList(in, out, s) -} - -func autoConvert_v1alpha1_AlicloudMachineClassSpec_To_machine_AlicloudMachineClassSpec(in *AlicloudMachineClassSpec, out *machine.AlicloudMachineClassSpec, s conversion.Scope) error { - out.ImageID = in.ImageID - out.InstanceType = in.InstanceType - out.Region = in.Region - out.ZoneID = in.ZoneID - out.SecurityGroupID = in.SecurityGroupID - out.VSwitchID = in.VSwitchID - out.PrivateIPAddress = in.PrivateIPAddress - out.SystemDisk = (*machine.AlicloudSystemDisk)(unsafe.Pointer(in.SystemDisk)) - if in.DataDisks != nil { - in, out := &in.DataDisks, &out.DataDisks - *out = make([]machine.AlicloudDataDisk, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_AlicloudDataDisk_To_machine_AlicloudDataDisk(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.DataDisks = nil - } - out.InstanceChargeType = in.InstanceChargeType - out.InternetChargeType = in.InternetChargeType - out.InternetMaxBandwidthIn = (*int)(unsafe.Pointer(in.InternetMaxBandwidthIn)) - out.InternetMaxBandwidthOut = (*int)(unsafe.Pointer(in.InternetMaxBandwidthOut)) - out.SpotStrategy = in.SpotStrategy - out.IoOptimized = in.IoOptimized - out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags)) - out.KeyPairName = in.KeyPairName - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - return nil -} - -// Convert_v1alpha1_AlicloudMachineClassSpec_To_machine_AlicloudMachineClassSpec is an autogenerated conversion function. -func Convert_v1alpha1_AlicloudMachineClassSpec_To_machine_AlicloudMachineClassSpec(in *AlicloudMachineClassSpec, out *machine.AlicloudMachineClassSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_AlicloudMachineClassSpec_To_machine_AlicloudMachineClassSpec(in, out, s) -} - -func autoConvert_machine_AlicloudMachineClassSpec_To_v1alpha1_AlicloudMachineClassSpec(in *machine.AlicloudMachineClassSpec, out *AlicloudMachineClassSpec, s conversion.Scope) error { - out.ImageID = in.ImageID - out.InstanceType = in.InstanceType - out.Region = in.Region - out.ZoneID = in.ZoneID - out.SecurityGroupID = in.SecurityGroupID - out.VSwitchID = in.VSwitchID - out.PrivateIPAddress = in.PrivateIPAddress - out.SystemDisk = (*AlicloudSystemDisk)(unsafe.Pointer(in.SystemDisk)) - if in.DataDisks != nil { - in, out := &in.DataDisks, &out.DataDisks - *out = make([]AlicloudDataDisk, len(*in)) - for i := range *in { - if err := Convert_machine_AlicloudDataDisk_To_v1alpha1_AlicloudDataDisk(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.DataDisks = nil - } - out.InstanceChargeType = in.InstanceChargeType - out.InternetChargeType = in.InternetChargeType - out.InternetMaxBandwidthIn = (*int)(unsafe.Pointer(in.InternetMaxBandwidthIn)) - out.InternetMaxBandwidthOut = (*int)(unsafe.Pointer(in.InternetMaxBandwidthOut)) - out.SpotStrategy = in.SpotStrategy - out.IoOptimized = in.IoOptimized - out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags)) - out.KeyPairName = in.KeyPairName - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - return nil -} - -// Convert_machine_AlicloudMachineClassSpec_To_v1alpha1_AlicloudMachineClassSpec is an autogenerated conversion function. -func Convert_machine_AlicloudMachineClassSpec_To_v1alpha1_AlicloudMachineClassSpec(in *machine.AlicloudMachineClassSpec, out *AlicloudMachineClassSpec, s conversion.Scope) error { - return autoConvert_machine_AlicloudMachineClassSpec_To_v1alpha1_AlicloudMachineClassSpec(in, out, s) -} - -func autoConvert_v1alpha1_AlicloudSystemDisk_To_machine_AlicloudSystemDisk(in *AlicloudSystemDisk, out *machine.AlicloudSystemDisk, s conversion.Scope) error { - out.Category = in.Category - out.Size = in.Size - return nil -} - -// Convert_v1alpha1_AlicloudSystemDisk_To_machine_AlicloudSystemDisk is an autogenerated conversion function. -func Convert_v1alpha1_AlicloudSystemDisk_To_machine_AlicloudSystemDisk(in *AlicloudSystemDisk, out *machine.AlicloudSystemDisk, s conversion.Scope) error { - return autoConvert_v1alpha1_AlicloudSystemDisk_To_machine_AlicloudSystemDisk(in, out, s) -} - -func autoConvert_machine_AlicloudSystemDisk_To_v1alpha1_AlicloudSystemDisk(in *machine.AlicloudSystemDisk, out *AlicloudSystemDisk, s conversion.Scope) error { - out.Category = in.Category - out.Size = in.Size - return nil -} - -// Convert_machine_AlicloudSystemDisk_To_v1alpha1_AlicloudSystemDisk is an autogenerated conversion function. -func Convert_machine_AlicloudSystemDisk_To_v1alpha1_AlicloudSystemDisk(in *machine.AlicloudSystemDisk, out *AlicloudSystemDisk, s conversion.Scope) error { - return autoConvert_machine_AlicloudSystemDisk_To_v1alpha1_AlicloudSystemDisk(in, out, s) -} - -func autoConvert_v1alpha1_AzureDataDisk_To_machine_AzureDataDisk(in *AzureDataDisk, out *machine.AzureDataDisk, s conversion.Scope) error { - out.Name = in.Name - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.Caching = in.Caching - out.StorageAccountType = in.StorageAccountType - out.DiskSizeGB = in.DiskSizeGB - return nil -} - -// Convert_v1alpha1_AzureDataDisk_To_machine_AzureDataDisk is an autogenerated conversion function. -func Convert_v1alpha1_AzureDataDisk_To_machine_AzureDataDisk(in *AzureDataDisk, out *machine.AzureDataDisk, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureDataDisk_To_machine_AzureDataDisk(in, out, s) -} - -func autoConvert_machine_AzureDataDisk_To_v1alpha1_AzureDataDisk(in *machine.AzureDataDisk, out *AzureDataDisk, s conversion.Scope) error { - out.Name = in.Name - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.Caching = in.Caching - out.StorageAccountType = in.StorageAccountType - out.DiskSizeGB = in.DiskSizeGB - return nil -} - -// Convert_machine_AzureDataDisk_To_v1alpha1_AzureDataDisk is an autogenerated conversion function. -func Convert_machine_AzureDataDisk_To_v1alpha1_AzureDataDisk(in *machine.AzureDataDisk, out *AzureDataDisk, s conversion.Scope) error { - return autoConvert_machine_AzureDataDisk_To_v1alpha1_AzureDataDisk(in, out, s) -} - -func autoConvert_v1alpha1_AzureHardwareProfile_To_machine_AzureHardwareProfile(in *AzureHardwareProfile, out *machine.AzureHardwareProfile, s conversion.Scope) error { - out.VMSize = in.VMSize - return nil -} - -// Convert_v1alpha1_AzureHardwareProfile_To_machine_AzureHardwareProfile is an autogenerated conversion function. -func Convert_v1alpha1_AzureHardwareProfile_To_machine_AzureHardwareProfile(in *AzureHardwareProfile, out *machine.AzureHardwareProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureHardwareProfile_To_machine_AzureHardwareProfile(in, out, s) -} - -func autoConvert_machine_AzureHardwareProfile_To_v1alpha1_AzureHardwareProfile(in *machine.AzureHardwareProfile, out *AzureHardwareProfile, s conversion.Scope) error { - out.VMSize = in.VMSize - return nil -} - -// Convert_machine_AzureHardwareProfile_To_v1alpha1_AzureHardwareProfile is an autogenerated conversion function. -func Convert_machine_AzureHardwareProfile_To_v1alpha1_AzureHardwareProfile(in *machine.AzureHardwareProfile, out *AzureHardwareProfile, s conversion.Scope) error { - return autoConvert_machine_AzureHardwareProfile_To_v1alpha1_AzureHardwareProfile(in, out, s) -} - -func autoConvert_v1alpha1_AzureImageReference_To_machine_AzureImageReference(in *AzureImageReference, out *machine.AzureImageReference, s conversion.Scope) error { - out.ID = in.ID - out.URN = (*string)(unsafe.Pointer(in.URN)) - return nil -} - -// Convert_v1alpha1_AzureImageReference_To_machine_AzureImageReference is an autogenerated conversion function. -func Convert_v1alpha1_AzureImageReference_To_machine_AzureImageReference(in *AzureImageReference, out *machine.AzureImageReference, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureImageReference_To_machine_AzureImageReference(in, out, s) -} - -func autoConvert_machine_AzureImageReference_To_v1alpha1_AzureImageReference(in *machine.AzureImageReference, out *AzureImageReference, s conversion.Scope) error { - out.ID = in.ID - out.URN = (*string)(unsafe.Pointer(in.URN)) - return nil -} - -// Convert_machine_AzureImageReference_To_v1alpha1_AzureImageReference is an autogenerated conversion function. -func Convert_machine_AzureImageReference_To_v1alpha1_AzureImageReference(in *machine.AzureImageReference, out *AzureImageReference, s conversion.Scope) error { - return autoConvert_machine_AzureImageReference_To_v1alpha1_AzureImageReference(in, out, s) -} - -func autoConvert_v1alpha1_AzureLinuxConfiguration_To_machine_AzureLinuxConfiguration(in *AzureLinuxConfiguration, out *machine.AzureLinuxConfiguration, s conversion.Scope) error { - out.DisablePasswordAuthentication = in.DisablePasswordAuthentication - if err := Convert_v1alpha1_AzureSSHConfiguration_To_machine_AzureSSHConfiguration(&in.SSH, &out.SSH, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_AzureLinuxConfiguration_To_machine_AzureLinuxConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_AzureLinuxConfiguration_To_machine_AzureLinuxConfiguration(in *AzureLinuxConfiguration, out *machine.AzureLinuxConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureLinuxConfiguration_To_machine_AzureLinuxConfiguration(in, out, s) -} - -func autoConvert_machine_AzureLinuxConfiguration_To_v1alpha1_AzureLinuxConfiguration(in *machine.AzureLinuxConfiguration, out *AzureLinuxConfiguration, s conversion.Scope) error { - out.DisablePasswordAuthentication = in.DisablePasswordAuthentication - if err := Convert_machine_AzureSSHConfiguration_To_v1alpha1_AzureSSHConfiguration(&in.SSH, &out.SSH, s); err != nil { - return err - } - return nil -} - -// Convert_machine_AzureLinuxConfiguration_To_v1alpha1_AzureLinuxConfiguration is an autogenerated conversion function. -func Convert_machine_AzureLinuxConfiguration_To_v1alpha1_AzureLinuxConfiguration(in *machine.AzureLinuxConfiguration, out *AzureLinuxConfiguration, s conversion.Scope) error { - return autoConvert_machine_AzureLinuxConfiguration_To_v1alpha1_AzureLinuxConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_AzureMachineClass_To_machine_AzureMachineClass(in *AzureMachineClass, out *machine.AzureMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_AzureMachineClassSpec_To_machine_AzureMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_AzureMachineClass_To_machine_AzureMachineClass is an autogenerated conversion function. -func Convert_v1alpha1_AzureMachineClass_To_machine_AzureMachineClass(in *AzureMachineClass, out *machine.AzureMachineClass, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureMachineClass_To_machine_AzureMachineClass(in, out, s) -} - -func autoConvert_machine_AzureMachineClass_To_v1alpha1_AzureMachineClass(in *machine.AzureMachineClass, out *AzureMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_machine_AzureMachineClassSpec_To_v1alpha1_AzureMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_machine_AzureMachineClass_To_v1alpha1_AzureMachineClass is an autogenerated conversion function. -func Convert_machine_AzureMachineClass_To_v1alpha1_AzureMachineClass(in *machine.AzureMachineClass, out *AzureMachineClass, s conversion.Scope) error { - return autoConvert_machine_AzureMachineClass_To_v1alpha1_AzureMachineClass(in, out, s) -} - -func autoConvert_v1alpha1_AzureMachineClassList_To_machine_AzureMachineClassList(in *AzureMachineClassList, out *machine.AzureMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]machine.AzureMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_AzureMachineClassList_To_machine_AzureMachineClassList is an autogenerated conversion function. -func Convert_v1alpha1_AzureMachineClassList_To_machine_AzureMachineClassList(in *AzureMachineClassList, out *machine.AzureMachineClassList, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureMachineClassList_To_machine_AzureMachineClassList(in, out, s) -} - -func autoConvert_machine_AzureMachineClassList_To_v1alpha1_AzureMachineClassList(in *machine.AzureMachineClassList, out *AzureMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]AzureMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_machine_AzureMachineClassList_To_v1alpha1_AzureMachineClassList is an autogenerated conversion function. -func Convert_machine_AzureMachineClassList_To_v1alpha1_AzureMachineClassList(in *machine.AzureMachineClassList, out *AzureMachineClassList, s conversion.Scope) error { - return autoConvert_machine_AzureMachineClassList_To_v1alpha1_AzureMachineClassList(in, out, s) -} - -func autoConvert_v1alpha1_AzureMachineClassSpec_To_machine_AzureMachineClassSpec(in *AzureMachineClassSpec, out *machine.AzureMachineClassSpec, s conversion.Scope) error { - out.Location = in.Location - out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags)) - if err := Convert_v1alpha1_AzureVirtualMachineProperties_To_machine_AzureVirtualMachineProperties(&in.Properties, &out.Properties, s); err != nil { - return err - } - out.ResourceGroup = in.ResourceGroup - if err := Convert_v1alpha1_AzureSubnetInfo_To_machine_AzureSubnetInfo(&in.SubnetInfo, &out.SubnetInfo, s); err != nil { - return err - } - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - return nil -} - -// Convert_v1alpha1_AzureMachineClassSpec_To_machine_AzureMachineClassSpec is an autogenerated conversion function. -func Convert_v1alpha1_AzureMachineClassSpec_To_machine_AzureMachineClassSpec(in *AzureMachineClassSpec, out *machine.AzureMachineClassSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureMachineClassSpec_To_machine_AzureMachineClassSpec(in, out, s) -} - -func autoConvert_machine_AzureMachineClassSpec_To_v1alpha1_AzureMachineClassSpec(in *machine.AzureMachineClassSpec, out *AzureMachineClassSpec, s conversion.Scope) error { - out.Location = in.Location - out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags)) - if err := Convert_machine_AzureVirtualMachineProperties_To_v1alpha1_AzureVirtualMachineProperties(&in.Properties, &out.Properties, s); err != nil { - return err - } - out.ResourceGroup = in.ResourceGroup - if err := Convert_machine_AzureSubnetInfo_To_v1alpha1_AzureSubnetInfo(&in.SubnetInfo, &out.SubnetInfo, s); err != nil { - return err - } - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - return nil -} - -// Convert_machine_AzureMachineClassSpec_To_v1alpha1_AzureMachineClassSpec is an autogenerated conversion function. -func Convert_machine_AzureMachineClassSpec_To_v1alpha1_AzureMachineClassSpec(in *machine.AzureMachineClassSpec, out *AzureMachineClassSpec, s conversion.Scope) error { - return autoConvert_machine_AzureMachineClassSpec_To_v1alpha1_AzureMachineClassSpec(in, out, s) -} - -func autoConvert_v1alpha1_AzureMachineSetConfig_To_machine_AzureMachineSetConfig(in *AzureMachineSetConfig, out *machine.AzureMachineSetConfig, s conversion.Scope) error { - out.ID = in.ID - out.Kind = in.Kind - return nil -} - -// Convert_v1alpha1_AzureMachineSetConfig_To_machine_AzureMachineSetConfig is an autogenerated conversion function. -func Convert_v1alpha1_AzureMachineSetConfig_To_machine_AzureMachineSetConfig(in *AzureMachineSetConfig, out *machine.AzureMachineSetConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureMachineSetConfig_To_machine_AzureMachineSetConfig(in, out, s) -} - -func autoConvert_machine_AzureMachineSetConfig_To_v1alpha1_AzureMachineSetConfig(in *machine.AzureMachineSetConfig, out *AzureMachineSetConfig, s conversion.Scope) error { - out.ID = in.ID - out.Kind = in.Kind - return nil -} - -// Convert_machine_AzureMachineSetConfig_To_v1alpha1_AzureMachineSetConfig is an autogenerated conversion function. -func Convert_machine_AzureMachineSetConfig_To_v1alpha1_AzureMachineSetConfig(in *machine.AzureMachineSetConfig, out *AzureMachineSetConfig, s conversion.Scope) error { - return autoConvert_machine_AzureMachineSetConfig_To_v1alpha1_AzureMachineSetConfig(in, out, s) -} - -func autoConvert_v1alpha1_AzureManagedDiskParameters_To_machine_AzureManagedDiskParameters(in *AzureManagedDiskParameters, out *machine.AzureManagedDiskParameters, s conversion.Scope) error { - out.ID = in.ID - out.StorageAccountType = in.StorageAccountType - return nil -} - -// Convert_v1alpha1_AzureManagedDiskParameters_To_machine_AzureManagedDiskParameters is an autogenerated conversion function. -func Convert_v1alpha1_AzureManagedDiskParameters_To_machine_AzureManagedDiskParameters(in *AzureManagedDiskParameters, out *machine.AzureManagedDiskParameters, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureManagedDiskParameters_To_machine_AzureManagedDiskParameters(in, out, s) -} - -func autoConvert_machine_AzureManagedDiskParameters_To_v1alpha1_AzureManagedDiskParameters(in *machine.AzureManagedDiskParameters, out *AzureManagedDiskParameters, s conversion.Scope) error { - out.ID = in.ID - out.StorageAccountType = in.StorageAccountType - return nil -} - -// Convert_machine_AzureManagedDiskParameters_To_v1alpha1_AzureManagedDiskParameters is an autogenerated conversion function. -func Convert_machine_AzureManagedDiskParameters_To_v1alpha1_AzureManagedDiskParameters(in *machine.AzureManagedDiskParameters, out *AzureManagedDiskParameters, s conversion.Scope) error { - return autoConvert_machine_AzureManagedDiskParameters_To_v1alpha1_AzureManagedDiskParameters(in, out, s) -} - -func autoConvert_v1alpha1_AzureNetworkInterfaceReference_To_machine_AzureNetworkInterfaceReference(in *AzureNetworkInterfaceReference, out *machine.AzureNetworkInterfaceReference, s conversion.Scope) error { - out.ID = in.ID - out.AzureNetworkInterfaceReferenceProperties = (*machine.AzureNetworkInterfaceReferenceProperties)(unsafe.Pointer(in.AzureNetworkInterfaceReferenceProperties)) - return nil -} - -// Convert_v1alpha1_AzureNetworkInterfaceReference_To_machine_AzureNetworkInterfaceReference is an autogenerated conversion function. -func Convert_v1alpha1_AzureNetworkInterfaceReference_To_machine_AzureNetworkInterfaceReference(in *AzureNetworkInterfaceReference, out *machine.AzureNetworkInterfaceReference, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureNetworkInterfaceReference_To_machine_AzureNetworkInterfaceReference(in, out, s) -} - -func autoConvert_machine_AzureNetworkInterfaceReference_To_v1alpha1_AzureNetworkInterfaceReference(in *machine.AzureNetworkInterfaceReference, out *AzureNetworkInterfaceReference, s conversion.Scope) error { - out.ID = in.ID - out.AzureNetworkInterfaceReferenceProperties = (*AzureNetworkInterfaceReferenceProperties)(unsafe.Pointer(in.AzureNetworkInterfaceReferenceProperties)) - return nil -} - -// Convert_machine_AzureNetworkInterfaceReference_To_v1alpha1_AzureNetworkInterfaceReference is an autogenerated conversion function. -func Convert_machine_AzureNetworkInterfaceReference_To_v1alpha1_AzureNetworkInterfaceReference(in *machine.AzureNetworkInterfaceReference, out *AzureNetworkInterfaceReference, s conversion.Scope) error { - return autoConvert_machine_AzureNetworkInterfaceReference_To_v1alpha1_AzureNetworkInterfaceReference(in, out, s) -} - -func autoConvert_v1alpha1_AzureNetworkInterfaceReferenceProperties_To_machine_AzureNetworkInterfaceReferenceProperties(in *AzureNetworkInterfaceReferenceProperties, out *machine.AzureNetworkInterfaceReferenceProperties, s conversion.Scope) error { - out.Primary = in.Primary - return nil -} - -// Convert_v1alpha1_AzureNetworkInterfaceReferenceProperties_To_machine_AzureNetworkInterfaceReferenceProperties is an autogenerated conversion function. -func Convert_v1alpha1_AzureNetworkInterfaceReferenceProperties_To_machine_AzureNetworkInterfaceReferenceProperties(in *AzureNetworkInterfaceReferenceProperties, out *machine.AzureNetworkInterfaceReferenceProperties, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureNetworkInterfaceReferenceProperties_To_machine_AzureNetworkInterfaceReferenceProperties(in, out, s) -} - -func autoConvert_machine_AzureNetworkInterfaceReferenceProperties_To_v1alpha1_AzureNetworkInterfaceReferenceProperties(in *machine.AzureNetworkInterfaceReferenceProperties, out *AzureNetworkInterfaceReferenceProperties, s conversion.Scope) error { - out.Primary = in.Primary - return nil -} - -// Convert_machine_AzureNetworkInterfaceReferenceProperties_To_v1alpha1_AzureNetworkInterfaceReferenceProperties is an autogenerated conversion function. -func Convert_machine_AzureNetworkInterfaceReferenceProperties_To_v1alpha1_AzureNetworkInterfaceReferenceProperties(in *machine.AzureNetworkInterfaceReferenceProperties, out *AzureNetworkInterfaceReferenceProperties, s conversion.Scope) error { - return autoConvert_machine_AzureNetworkInterfaceReferenceProperties_To_v1alpha1_AzureNetworkInterfaceReferenceProperties(in, out, s) -} - -func autoConvert_v1alpha1_AzureNetworkProfile_To_machine_AzureNetworkProfile(in *AzureNetworkProfile, out *machine.AzureNetworkProfile, s conversion.Scope) error { - if err := Convert_v1alpha1_AzureNetworkInterfaceReference_To_machine_AzureNetworkInterfaceReference(&in.NetworkInterfaces, &out.NetworkInterfaces, s); err != nil { - return err - } - out.AcceleratedNetworking = (*bool)(unsafe.Pointer(in.AcceleratedNetworking)) - return nil -} - -// Convert_v1alpha1_AzureNetworkProfile_To_machine_AzureNetworkProfile is an autogenerated conversion function. -func Convert_v1alpha1_AzureNetworkProfile_To_machine_AzureNetworkProfile(in *AzureNetworkProfile, out *machine.AzureNetworkProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureNetworkProfile_To_machine_AzureNetworkProfile(in, out, s) -} - -func autoConvert_machine_AzureNetworkProfile_To_v1alpha1_AzureNetworkProfile(in *machine.AzureNetworkProfile, out *AzureNetworkProfile, s conversion.Scope) error { - if err := Convert_machine_AzureNetworkInterfaceReference_To_v1alpha1_AzureNetworkInterfaceReference(&in.NetworkInterfaces, &out.NetworkInterfaces, s); err != nil { - return err - } - out.AcceleratedNetworking = (*bool)(unsafe.Pointer(in.AcceleratedNetworking)) - return nil -} - -// Convert_machine_AzureNetworkProfile_To_v1alpha1_AzureNetworkProfile is an autogenerated conversion function. -func Convert_machine_AzureNetworkProfile_To_v1alpha1_AzureNetworkProfile(in *machine.AzureNetworkProfile, out *AzureNetworkProfile, s conversion.Scope) error { - return autoConvert_machine_AzureNetworkProfile_To_v1alpha1_AzureNetworkProfile(in, out, s) -} - -func autoConvert_v1alpha1_AzureOSDisk_To_machine_AzureOSDisk(in *AzureOSDisk, out *machine.AzureOSDisk, s conversion.Scope) error { - out.Name = in.Name - out.Caching = in.Caching - if err := Convert_v1alpha1_AzureManagedDiskParameters_To_machine_AzureManagedDiskParameters(&in.ManagedDisk, &out.ManagedDisk, s); err != nil { - return err - } - out.DiskSizeGB = in.DiskSizeGB - out.CreateOption = in.CreateOption - return nil -} - -// Convert_v1alpha1_AzureOSDisk_To_machine_AzureOSDisk is an autogenerated conversion function. -func Convert_v1alpha1_AzureOSDisk_To_machine_AzureOSDisk(in *AzureOSDisk, out *machine.AzureOSDisk, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureOSDisk_To_machine_AzureOSDisk(in, out, s) -} - -func autoConvert_machine_AzureOSDisk_To_v1alpha1_AzureOSDisk(in *machine.AzureOSDisk, out *AzureOSDisk, s conversion.Scope) error { - out.Name = in.Name - out.Caching = in.Caching - if err := Convert_machine_AzureManagedDiskParameters_To_v1alpha1_AzureManagedDiskParameters(&in.ManagedDisk, &out.ManagedDisk, s); err != nil { - return err - } - out.DiskSizeGB = in.DiskSizeGB - out.CreateOption = in.CreateOption - return nil -} - -// Convert_machine_AzureOSDisk_To_v1alpha1_AzureOSDisk is an autogenerated conversion function. -func Convert_machine_AzureOSDisk_To_v1alpha1_AzureOSDisk(in *machine.AzureOSDisk, out *AzureOSDisk, s conversion.Scope) error { - return autoConvert_machine_AzureOSDisk_To_v1alpha1_AzureOSDisk(in, out, s) -} - -func autoConvert_v1alpha1_AzureOSProfile_To_machine_AzureOSProfile(in *AzureOSProfile, out *machine.AzureOSProfile, s conversion.Scope) error { - out.ComputerName = in.ComputerName - out.AdminUsername = in.AdminUsername - out.AdminPassword = in.AdminPassword - out.CustomData = in.CustomData - if err := Convert_v1alpha1_AzureLinuxConfiguration_To_machine_AzureLinuxConfiguration(&in.LinuxConfiguration, &out.LinuxConfiguration, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_AzureOSProfile_To_machine_AzureOSProfile is an autogenerated conversion function. -func Convert_v1alpha1_AzureOSProfile_To_machine_AzureOSProfile(in *AzureOSProfile, out *machine.AzureOSProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureOSProfile_To_machine_AzureOSProfile(in, out, s) -} - -func autoConvert_machine_AzureOSProfile_To_v1alpha1_AzureOSProfile(in *machine.AzureOSProfile, out *AzureOSProfile, s conversion.Scope) error { - out.ComputerName = in.ComputerName - out.AdminUsername = in.AdminUsername - out.AdminPassword = in.AdminPassword - out.CustomData = in.CustomData - if err := Convert_machine_AzureLinuxConfiguration_To_v1alpha1_AzureLinuxConfiguration(&in.LinuxConfiguration, &out.LinuxConfiguration, s); err != nil { - return err - } - return nil -} - -// Convert_machine_AzureOSProfile_To_v1alpha1_AzureOSProfile is an autogenerated conversion function. -func Convert_machine_AzureOSProfile_To_v1alpha1_AzureOSProfile(in *machine.AzureOSProfile, out *AzureOSProfile, s conversion.Scope) error { - return autoConvert_machine_AzureOSProfile_To_v1alpha1_AzureOSProfile(in, out, s) -} - -func autoConvert_v1alpha1_AzureSSHConfiguration_To_machine_AzureSSHConfiguration(in *AzureSSHConfiguration, out *machine.AzureSSHConfiguration, s conversion.Scope) error { - if err := Convert_v1alpha1_AzureSSHPublicKey_To_machine_AzureSSHPublicKey(&in.PublicKeys, &out.PublicKeys, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_AzureSSHConfiguration_To_machine_AzureSSHConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_AzureSSHConfiguration_To_machine_AzureSSHConfiguration(in *AzureSSHConfiguration, out *machine.AzureSSHConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureSSHConfiguration_To_machine_AzureSSHConfiguration(in, out, s) -} - -func autoConvert_machine_AzureSSHConfiguration_To_v1alpha1_AzureSSHConfiguration(in *machine.AzureSSHConfiguration, out *AzureSSHConfiguration, s conversion.Scope) error { - if err := Convert_machine_AzureSSHPublicKey_To_v1alpha1_AzureSSHPublicKey(&in.PublicKeys, &out.PublicKeys, s); err != nil { - return err - } - return nil -} - -// Convert_machine_AzureSSHConfiguration_To_v1alpha1_AzureSSHConfiguration is an autogenerated conversion function. -func Convert_machine_AzureSSHConfiguration_To_v1alpha1_AzureSSHConfiguration(in *machine.AzureSSHConfiguration, out *AzureSSHConfiguration, s conversion.Scope) error { - return autoConvert_machine_AzureSSHConfiguration_To_v1alpha1_AzureSSHConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_AzureSSHPublicKey_To_machine_AzureSSHPublicKey(in *AzureSSHPublicKey, out *machine.AzureSSHPublicKey, s conversion.Scope) error { - out.Path = in.Path - out.KeyData = in.KeyData - return nil -} - -// Convert_v1alpha1_AzureSSHPublicKey_To_machine_AzureSSHPublicKey is an autogenerated conversion function. -func Convert_v1alpha1_AzureSSHPublicKey_To_machine_AzureSSHPublicKey(in *AzureSSHPublicKey, out *machine.AzureSSHPublicKey, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureSSHPublicKey_To_machine_AzureSSHPublicKey(in, out, s) -} - -func autoConvert_machine_AzureSSHPublicKey_To_v1alpha1_AzureSSHPublicKey(in *machine.AzureSSHPublicKey, out *AzureSSHPublicKey, s conversion.Scope) error { - out.Path = in.Path - out.KeyData = in.KeyData - return nil -} - -// Convert_machine_AzureSSHPublicKey_To_v1alpha1_AzureSSHPublicKey is an autogenerated conversion function. -func Convert_machine_AzureSSHPublicKey_To_v1alpha1_AzureSSHPublicKey(in *machine.AzureSSHPublicKey, out *AzureSSHPublicKey, s conversion.Scope) error { - return autoConvert_machine_AzureSSHPublicKey_To_v1alpha1_AzureSSHPublicKey(in, out, s) -} - -func autoConvert_v1alpha1_AzureStorageProfile_To_machine_AzureStorageProfile(in *AzureStorageProfile, out *machine.AzureStorageProfile, s conversion.Scope) error { - if err := Convert_v1alpha1_AzureImageReference_To_machine_AzureImageReference(&in.ImageReference, &out.ImageReference, s); err != nil { - return err - } - if err := Convert_v1alpha1_AzureOSDisk_To_machine_AzureOSDisk(&in.OsDisk, &out.OsDisk, s); err != nil { - return err - } - out.DataDisks = *(*[]machine.AzureDataDisk)(unsafe.Pointer(&in.DataDisks)) - return nil -} - -// Convert_v1alpha1_AzureStorageProfile_To_machine_AzureStorageProfile is an autogenerated conversion function. -func Convert_v1alpha1_AzureStorageProfile_To_machine_AzureStorageProfile(in *AzureStorageProfile, out *machine.AzureStorageProfile, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureStorageProfile_To_machine_AzureStorageProfile(in, out, s) -} - -func autoConvert_machine_AzureStorageProfile_To_v1alpha1_AzureStorageProfile(in *machine.AzureStorageProfile, out *AzureStorageProfile, s conversion.Scope) error { - if err := Convert_machine_AzureImageReference_To_v1alpha1_AzureImageReference(&in.ImageReference, &out.ImageReference, s); err != nil { - return err - } - if err := Convert_machine_AzureOSDisk_To_v1alpha1_AzureOSDisk(&in.OsDisk, &out.OsDisk, s); err != nil { + return Convert_machine_MachineTemplateSpec_To_v1alpha1_MachineTemplateSpec(a.(*machine.MachineTemplateSpec), b.(*MachineTemplateSpec), scope) + }); err != nil { return err } - out.DataDisks = *(*[]AzureDataDisk)(unsafe.Pointer(&in.DataDisks)) - return nil -} - -// Convert_machine_AzureStorageProfile_To_v1alpha1_AzureStorageProfile is an autogenerated conversion function. -func Convert_machine_AzureStorageProfile_To_v1alpha1_AzureStorageProfile(in *machine.AzureStorageProfile, out *AzureStorageProfile, s conversion.Scope) error { - return autoConvert_machine_AzureStorageProfile_To_v1alpha1_AzureStorageProfile(in, out, s) -} - -func autoConvert_v1alpha1_AzureSubResource_To_machine_AzureSubResource(in *AzureSubResource, out *machine.AzureSubResource, s conversion.Scope) error { - out.ID = in.ID - return nil -} - -// Convert_v1alpha1_AzureSubResource_To_machine_AzureSubResource is an autogenerated conversion function. -func Convert_v1alpha1_AzureSubResource_To_machine_AzureSubResource(in *AzureSubResource, out *machine.AzureSubResource, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureSubResource_To_machine_AzureSubResource(in, out, s) -} - -func autoConvert_machine_AzureSubResource_To_v1alpha1_AzureSubResource(in *machine.AzureSubResource, out *AzureSubResource, s conversion.Scope) error { - out.ID = in.ID - return nil -} - -// Convert_machine_AzureSubResource_To_v1alpha1_AzureSubResource is an autogenerated conversion function. -func Convert_machine_AzureSubResource_To_v1alpha1_AzureSubResource(in *machine.AzureSubResource, out *AzureSubResource, s conversion.Scope) error { - return autoConvert_machine_AzureSubResource_To_v1alpha1_AzureSubResource(in, out, s) -} - -func autoConvert_v1alpha1_AzureSubnetInfo_To_machine_AzureSubnetInfo(in *AzureSubnetInfo, out *machine.AzureSubnetInfo, s conversion.Scope) error { - out.VnetName = in.VnetName - out.VnetResourceGroup = (*string)(unsafe.Pointer(in.VnetResourceGroup)) - out.SubnetName = in.SubnetName - return nil -} - -// Convert_v1alpha1_AzureSubnetInfo_To_machine_AzureSubnetInfo is an autogenerated conversion function. -func Convert_v1alpha1_AzureSubnetInfo_To_machine_AzureSubnetInfo(in *AzureSubnetInfo, out *machine.AzureSubnetInfo, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureSubnetInfo_To_machine_AzureSubnetInfo(in, out, s) -} - -func autoConvert_machine_AzureSubnetInfo_To_v1alpha1_AzureSubnetInfo(in *machine.AzureSubnetInfo, out *AzureSubnetInfo, s conversion.Scope) error { - out.VnetName = in.VnetName - out.VnetResourceGroup = (*string)(unsafe.Pointer(in.VnetResourceGroup)) - out.SubnetName = in.SubnetName - return nil -} - -// Convert_machine_AzureSubnetInfo_To_v1alpha1_AzureSubnetInfo is an autogenerated conversion function. -func Convert_machine_AzureSubnetInfo_To_v1alpha1_AzureSubnetInfo(in *machine.AzureSubnetInfo, out *AzureSubnetInfo, s conversion.Scope) error { - return autoConvert_machine_AzureSubnetInfo_To_v1alpha1_AzureSubnetInfo(in, out, s) -} - -func autoConvert_v1alpha1_AzureVirtualMachineProperties_To_machine_AzureVirtualMachineProperties(in *AzureVirtualMachineProperties, out *machine.AzureVirtualMachineProperties, s conversion.Scope) error { - if err := Convert_v1alpha1_AzureHardwareProfile_To_machine_AzureHardwareProfile(&in.HardwareProfile, &out.HardwareProfile, s); err != nil { + if err := s.AddGeneratedConversionFunc((*NodeTemplate)(nil), (*machine.NodeTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NodeTemplate_To_machine_NodeTemplate(a.(*NodeTemplate), b.(*machine.NodeTemplate), scope) + }); err != nil { return err } - if err := Convert_v1alpha1_AzureStorageProfile_To_machine_AzureStorageProfile(&in.StorageProfile, &out.StorageProfile, s); err != nil { + if err := s.AddGeneratedConversionFunc((*machine.NodeTemplate)(nil), (*NodeTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_machine_NodeTemplate_To_v1alpha1_NodeTemplate(a.(*machine.NodeTemplate), b.(*NodeTemplate), scope) + }); err != nil { return err } - if err := Convert_v1alpha1_AzureOSProfile_To_machine_AzureOSProfile(&in.OsProfile, &out.OsProfile, s); err != nil { + if err := s.AddGeneratedConversionFunc((*NodeTemplateSpec)(nil), (*machine.NodeTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NodeTemplateSpec_To_machine_NodeTemplateSpec(a.(*NodeTemplateSpec), b.(*machine.NodeTemplateSpec), scope) + }); err != nil { return err } - if err := Convert_v1alpha1_AzureNetworkProfile_To_machine_AzureNetworkProfile(&in.NetworkProfile, &out.NetworkProfile, s); err != nil { + if err := s.AddGeneratedConversionFunc((*machine.NodeTemplateSpec)(nil), (*NodeTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_machine_NodeTemplateSpec_To_v1alpha1_NodeTemplateSpec(a.(*machine.NodeTemplateSpec), b.(*NodeTemplateSpec), scope) + }); err != nil { return err } - out.AvailabilitySet = (*machine.AzureSubResource)(unsafe.Pointer(in.AvailabilitySet)) - out.IdentityID = (*string)(unsafe.Pointer(in.IdentityID)) - out.Zone = (*int)(unsafe.Pointer(in.Zone)) - out.MachineSet = (*machine.AzureMachineSetConfig)(unsafe.Pointer(in.MachineSet)) - return nil -} - -// Convert_v1alpha1_AzureVirtualMachineProperties_To_machine_AzureVirtualMachineProperties is an autogenerated conversion function. -func Convert_v1alpha1_AzureVirtualMachineProperties_To_machine_AzureVirtualMachineProperties(in *AzureVirtualMachineProperties, out *machine.AzureVirtualMachineProperties, s conversion.Scope) error { - return autoConvert_v1alpha1_AzureVirtualMachineProperties_To_machine_AzureVirtualMachineProperties(in, out, s) -} - -func autoConvert_machine_AzureVirtualMachineProperties_To_v1alpha1_AzureVirtualMachineProperties(in *machine.AzureVirtualMachineProperties, out *AzureVirtualMachineProperties, s conversion.Scope) error { - if err := Convert_machine_AzureHardwareProfile_To_v1alpha1_AzureHardwareProfile(&in.HardwareProfile, &out.HardwareProfile, s); err != nil { + if err := s.AddGeneratedConversionFunc((*RollbackConfig)(nil), (*machine.RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_RollbackConfig_To_machine_RollbackConfig(a.(*RollbackConfig), b.(*machine.RollbackConfig), scope) + }); err != nil { return err } - if err := Convert_machine_AzureStorageProfile_To_v1alpha1_AzureStorageProfile(&in.StorageProfile, &out.StorageProfile, s); err != nil { + if err := s.AddGeneratedConversionFunc((*machine.RollbackConfig)(nil), (*RollbackConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_machine_RollbackConfig_To_v1alpha1_RollbackConfig(a.(*machine.RollbackConfig), b.(*RollbackConfig), scope) + }); err != nil { return err } - if err := Convert_machine_AzureOSProfile_To_v1alpha1_AzureOSProfile(&in.OsProfile, &out.OsProfile, s); err != nil { + if err := s.AddGeneratedConversionFunc((*RollingUpdateMachineDeployment)(nil), (*machine.RollingUpdateMachineDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_RollingUpdateMachineDeployment_To_machine_RollingUpdateMachineDeployment(a.(*RollingUpdateMachineDeployment), b.(*machine.RollingUpdateMachineDeployment), scope) + }); err != nil { return err } - if err := Convert_machine_AzureNetworkProfile_To_v1alpha1_AzureNetworkProfile(&in.NetworkProfile, &out.NetworkProfile, s); err != nil { + if err := s.AddGeneratedConversionFunc((*machine.RollingUpdateMachineDeployment)(nil), (*RollingUpdateMachineDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_machine_RollingUpdateMachineDeployment_To_v1alpha1_RollingUpdateMachineDeployment(a.(*machine.RollingUpdateMachineDeployment), b.(*RollingUpdateMachineDeployment), scope) + }); err != nil { return err } - out.AvailabilitySet = (*AzureSubResource)(unsafe.Pointer(in.AvailabilitySet)) - out.IdentityID = (*string)(unsafe.Pointer(in.IdentityID)) - out.Zone = (*int)(unsafe.Pointer(in.Zone)) - out.MachineSet = (*AzureMachineSetConfig)(unsafe.Pointer(in.MachineSet)) return nil } -// Convert_machine_AzureVirtualMachineProperties_To_v1alpha1_AzureVirtualMachineProperties is an autogenerated conversion function. -func Convert_machine_AzureVirtualMachineProperties_To_v1alpha1_AzureVirtualMachineProperties(in *machine.AzureVirtualMachineProperties, out *AzureVirtualMachineProperties, s conversion.Scope) error { - return autoConvert_machine_AzureVirtualMachineProperties_To_v1alpha1_AzureVirtualMachineProperties(in, out, s) -} - func autoConvert_v1alpha1_ClassSpec_To_machine_ClassSpec(in *ClassSpec, out *machine.ClassSpec, s conversion.Scope) error { out.APIGroup = in.APIGroup out.Kind = in.Kind @@ -1766,232 +360,9 @@ func Convert_machine_CurrentStatus_To_v1alpha1_CurrentStatus(in *machine.Current return autoConvert_machine_CurrentStatus_To_v1alpha1_CurrentStatus(in, out, s) } -func autoConvert_v1alpha1_GCPDisk_To_machine_GCPDisk(in *GCPDisk, out *machine.GCPDisk, s conversion.Scope) error { - out.AutoDelete = (*bool)(unsafe.Pointer(in.AutoDelete)) - out.Boot = in.Boot - out.SizeGb = in.SizeGb - out.Type = in.Type - out.Interface = in.Interface - out.Image = in.Image - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - return nil -} - -// Convert_v1alpha1_GCPDisk_To_machine_GCPDisk is an autogenerated conversion function. -func Convert_v1alpha1_GCPDisk_To_machine_GCPDisk(in *GCPDisk, out *machine.GCPDisk, s conversion.Scope) error { - return autoConvert_v1alpha1_GCPDisk_To_machine_GCPDisk(in, out, s) -} - -func autoConvert_machine_GCPDisk_To_v1alpha1_GCPDisk(in *machine.GCPDisk, out *GCPDisk, s conversion.Scope) error { - out.AutoDelete = (*bool)(unsafe.Pointer(in.AutoDelete)) - out.Boot = in.Boot - out.SizeGb = in.SizeGb - out.Type = in.Type - out.Interface = in.Interface - out.Image = in.Image - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - return nil -} - -// Convert_machine_GCPDisk_To_v1alpha1_GCPDisk is an autogenerated conversion function. -func Convert_machine_GCPDisk_To_v1alpha1_GCPDisk(in *machine.GCPDisk, out *GCPDisk, s conversion.Scope) error { - return autoConvert_machine_GCPDisk_To_v1alpha1_GCPDisk(in, out, s) -} - -func autoConvert_v1alpha1_GCPMachineClass_To_machine_GCPMachineClass(in *GCPMachineClass, out *machine.GCPMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_GCPMachineClassSpec_To_machine_GCPMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_GCPMachineClass_To_machine_GCPMachineClass is an autogenerated conversion function. -func Convert_v1alpha1_GCPMachineClass_To_machine_GCPMachineClass(in *GCPMachineClass, out *machine.GCPMachineClass, s conversion.Scope) error { - return autoConvert_v1alpha1_GCPMachineClass_To_machine_GCPMachineClass(in, out, s) -} - -func autoConvert_machine_GCPMachineClass_To_v1alpha1_GCPMachineClass(in *machine.GCPMachineClass, out *GCPMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_machine_GCPMachineClassSpec_To_v1alpha1_GCPMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_machine_GCPMachineClass_To_v1alpha1_GCPMachineClass is an autogenerated conversion function. -func Convert_machine_GCPMachineClass_To_v1alpha1_GCPMachineClass(in *machine.GCPMachineClass, out *GCPMachineClass, s conversion.Scope) error { - return autoConvert_machine_GCPMachineClass_To_v1alpha1_GCPMachineClass(in, out, s) -} - -func autoConvert_v1alpha1_GCPMachineClassList_To_machine_GCPMachineClassList(in *GCPMachineClassList, out *machine.GCPMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]machine.GCPMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_GCPMachineClassList_To_machine_GCPMachineClassList is an autogenerated conversion function. -func Convert_v1alpha1_GCPMachineClassList_To_machine_GCPMachineClassList(in *GCPMachineClassList, out *machine.GCPMachineClassList, s conversion.Scope) error { - return autoConvert_v1alpha1_GCPMachineClassList_To_machine_GCPMachineClassList(in, out, s) -} - -func autoConvert_machine_GCPMachineClassList_To_v1alpha1_GCPMachineClassList(in *machine.GCPMachineClassList, out *GCPMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]GCPMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_machine_GCPMachineClassList_To_v1alpha1_GCPMachineClassList is an autogenerated conversion function. -func Convert_machine_GCPMachineClassList_To_v1alpha1_GCPMachineClassList(in *machine.GCPMachineClassList, out *GCPMachineClassList, s conversion.Scope) error { - return autoConvert_machine_GCPMachineClassList_To_v1alpha1_GCPMachineClassList(in, out, s) -} - -func autoConvert_v1alpha1_GCPMachineClassSpec_To_machine_GCPMachineClassSpec(in *GCPMachineClassSpec, out *machine.GCPMachineClassSpec, s conversion.Scope) error { - out.CanIpForward = in.CanIpForward - out.DeletionProtection = in.DeletionProtection - out.Description = (*string)(unsafe.Pointer(in.Description)) - out.Disks = *(*[]*machine.GCPDisk)(unsafe.Pointer(&in.Disks)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.MachineType = in.MachineType - out.Metadata = *(*[]*machine.GCPMetadata)(unsafe.Pointer(&in.Metadata)) - out.NetworkInterfaces = *(*[]*machine.GCPNetworkInterface)(unsafe.Pointer(&in.NetworkInterfaces)) - if err := Convert_v1alpha1_GCPScheduling_To_machine_GCPScheduling(&in.Scheduling, &out.Scheduling, s); err != nil { - return err - } - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - out.ServiceAccounts = *(*[]machine.GCPServiceAccount)(unsafe.Pointer(&in.ServiceAccounts)) - out.Tags = *(*[]string)(unsafe.Pointer(&in.Tags)) - out.Region = in.Region - out.Zone = in.Zone - return nil -} - -// Convert_v1alpha1_GCPMachineClassSpec_To_machine_GCPMachineClassSpec is an autogenerated conversion function. -func Convert_v1alpha1_GCPMachineClassSpec_To_machine_GCPMachineClassSpec(in *GCPMachineClassSpec, out *machine.GCPMachineClassSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_GCPMachineClassSpec_To_machine_GCPMachineClassSpec(in, out, s) -} - -func autoConvert_machine_GCPMachineClassSpec_To_v1alpha1_GCPMachineClassSpec(in *machine.GCPMachineClassSpec, out *GCPMachineClassSpec, s conversion.Scope) error { - out.CanIpForward = in.CanIpForward - out.DeletionProtection = in.DeletionProtection - out.Description = (*string)(unsafe.Pointer(in.Description)) - out.Disks = *(*[]*GCPDisk)(unsafe.Pointer(&in.Disks)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.MachineType = in.MachineType - out.Metadata = *(*[]*GCPMetadata)(unsafe.Pointer(&in.Metadata)) - out.NetworkInterfaces = *(*[]*GCPNetworkInterface)(unsafe.Pointer(&in.NetworkInterfaces)) - if err := Convert_machine_GCPScheduling_To_v1alpha1_GCPScheduling(&in.Scheduling, &out.Scheduling, s); err != nil { - return err - } - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - out.ServiceAccounts = *(*[]GCPServiceAccount)(unsafe.Pointer(&in.ServiceAccounts)) - out.Tags = *(*[]string)(unsafe.Pointer(&in.Tags)) - out.Region = in.Region - out.Zone = in.Zone - return nil -} - -// Convert_machine_GCPMachineClassSpec_To_v1alpha1_GCPMachineClassSpec is an autogenerated conversion function. -func Convert_machine_GCPMachineClassSpec_To_v1alpha1_GCPMachineClassSpec(in *machine.GCPMachineClassSpec, out *GCPMachineClassSpec, s conversion.Scope) error { - return autoConvert_machine_GCPMachineClassSpec_To_v1alpha1_GCPMachineClassSpec(in, out, s) -} - -func autoConvert_v1alpha1_GCPMetadata_To_machine_GCPMetadata(in *GCPMetadata, out *machine.GCPMetadata, s conversion.Scope) error { - out.Key = in.Key - out.Value = (*string)(unsafe.Pointer(in.Value)) - return nil -} - -// Convert_v1alpha1_GCPMetadata_To_machine_GCPMetadata is an autogenerated conversion function. -func Convert_v1alpha1_GCPMetadata_To_machine_GCPMetadata(in *GCPMetadata, out *machine.GCPMetadata, s conversion.Scope) error { - return autoConvert_v1alpha1_GCPMetadata_To_machine_GCPMetadata(in, out, s) -} - -func autoConvert_machine_GCPMetadata_To_v1alpha1_GCPMetadata(in *machine.GCPMetadata, out *GCPMetadata, s conversion.Scope) error { - out.Key = in.Key - out.Value = (*string)(unsafe.Pointer(in.Value)) - return nil -} - -// Convert_machine_GCPMetadata_To_v1alpha1_GCPMetadata is an autogenerated conversion function. -func Convert_machine_GCPMetadata_To_v1alpha1_GCPMetadata(in *machine.GCPMetadata, out *GCPMetadata, s conversion.Scope) error { - return autoConvert_machine_GCPMetadata_To_v1alpha1_GCPMetadata(in, out, s) -} - -func autoConvert_v1alpha1_GCPNetworkInterface_To_machine_GCPNetworkInterface(in *GCPNetworkInterface, out *machine.GCPNetworkInterface, s conversion.Scope) error { - out.DisableExternalIP = in.DisableExternalIP - out.Network = in.Network - out.Subnetwork = in.Subnetwork - return nil -} - -// Convert_v1alpha1_GCPNetworkInterface_To_machine_GCPNetworkInterface is an autogenerated conversion function. -func Convert_v1alpha1_GCPNetworkInterface_To_machine_GCPNetworkInterface(in *GCPNetworkInterface, out *machine.GCPNetworkInterface, s conversion.Scope) error { - return autoConvert_v1alpha1_GCPNetworkInterface_To_machine_GCPNetworkInterface(in, out, s) -} - -func autoConvert_machine_GCPNetworkInterface_To_v1alpha1_GCPNetworkInterface(in *machine.GCPNetworkInterface, out *GCPNetworkInterface, s conversion.Scope) error { - out.DisableExternalIP = in.DisableExternalIP - out.Network = in.Network - out.Subnetwork = in.Subnetwork - return nil -} - -// Convert_machine_GCPNetworkInterface_To_v1alpha1_GCPNetworkInterface is an autogenerated conversion function. -func Convert_machine_GCPNetworkInterface_To_v1alpha1_GCPNetworkInterface(in *machine.GCPNetworkInterface, out *GCPNetworkInterface, s conversion.Scope) error { - return autoConvert_machine_GCPNetworkInterface_To_v1alpha1_GCPNetworkInterface(in, out, s) -} - -func autoConvert_v1alpha1_GCPScheduling_To_machine_GCPScheduling(in *GCPScheduling, out *machine.GCPScheduling, s conversion.Scope) error { - out.AutomaticRestart = in.AutomaticRestart - out.OnHostMaintenance = in.OnHostMaintenance - out.Preemptible = in.Preemptible - return nil -} - -// Convert_v1alpha1_GCPScheduling_To_machine_GCPScheduling is an autogenerated conversion function. -func Convert_v1alpha1_GCPScheduling_To_machine_GCPScheduling(in *GCPScheduling, out *machine.GCPScheduling, s conversion.Scope) error { - return autoConvert_v1alpha1_GCPScheduling_To_machine_GCPScheduling(in, out, s) -} - -func autoConvert_machine_GCPScheduling_To_v1alpha1_GCPScheduling(in *machine.GCPScheduling, out *GCPScheduling, s conversion.Scope) error { - out.AutomaticRestart = in.AutomaticRestart - out.OnHostMaintenance = in.OnHostMaintenance - out.Preemptible = in.Preemptible - return nil -} - -// Convert_machine_GCPScheduling_To_v1alpha1_GCPScheduling is an autogenerated conversion function. -func Convert_machine_GCPScheduling_To_v1alpha1_GCPScheduling(in *machine.GCPScheduling, out *GCPScheduling, s conversion.Scope) error { - return autoConvert_machine_GCPScheduling_To_v1alpha1_GCPScheduling(in, out, s) -} - -func autoConvert_v1alpha1_GCPServiceAccount_To_machine_GCPServiceAccount(in *GCPServiceAccount, out *machine.GCPServiceAccount, s conversion.Scope) error { - out.Email = in.Email - out.Scopes = *(*[]string)(unsafe.Pointer(&in.Scopes)) - return nil -} - -// Convert_v1alpha1_GCPServiceAccount_To_machine_GCPServiceAccount is an autogenerated conversion function. -func Convert_v1alpha1_GCPServiceAccount_To_machine_GCPServiceAccount(in *GCPServiceAccount, out *machine.GCPServiceAccount, s conversion.Scope) error { - return autoConvert_v1alpha1_GCPServiceAccount_To_machine_GCPServiceAccount(in, out, s) -} - -func autoConvert_machine_GCPServiceAccount_To_v1alpha1_GCPServiceAccount(in *machine.GCPServiceAccount, out *GCPServiceAccount, s conversion.Scope) error { - out.Email = in.Email - out.Scopes = *(*[]string)(unsafe.Pointer(&in.Scopes)) - return nil -} - -// Convert_machine_GCPServiceAccount_To_v1alpha1_GCPServiceAccount is an autogenerated conversion function. -func Convert_machine_GCPServiceAccount_To_v1alpha1_GCPServiceAccount(in *machine.GCPServiceAccount, out *GCPServiceAccount, s conversion.Scope) error { - return autoConvert_machine_GCPServiceAccount_To_v1alpha1_GCPServiceAccount(in, out, s) -} - func autoConvert_v1alpha1_LastOperation_To_machine_LastOperation(in *LastOperation, out *machine.LastOperation, s conversion.Scope) error { out.Description = in.Description + out.ErrorCode = in.ErrorCode out.LastUpdateTime = in.LastUpdateTime out.State = machine.MachineState(in.State) out.Type = machine.MachineOperationType(in.Type) @@ -2005,6 +376,7 @@ func Convert_v1alpha1_LastOperation_To_machine_LastOperation(in *LastOperation, func autoConvert_machine_LastOperation_To_v1alpha1_LastOperation(in *machine.LastOperation, out *LastOperation, s conversion.Scope) error { out.Description = in.Description + out.ErrorCode = in.ErrorCode out.LastUpdateTime = in.LastUpdateTime out.State = MachineState(in.State) out.Type = MachineOperationType(in.Type) @@ -2684,216 +1056,6 @@ func Convert_machine_NodeTemplateSpec_To_v1alpha1_NodeTemplateSpec(in *machine.N return autoConvert_machine_NodeTemplateSpec_To_v1alpha1_NodeTemplateSpec(in, out, s) } -func autoConvert_v1alpha1_OpenStackMachineClass_To_machine_OpenStackMachineClass(in *OpenStackMachineClass, out *machine.OpenStackMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_OpenStackMachineClassSpec_To_machine_OpenStackMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_OpenStackMachineClass_To_machine_OpenStackMachineClass is an autogenerated conversion function. -func Convert_v1alpha1_OpenStackMachineClass_To_machine_OpenStackMachineClass(in *OpenStackMachineClass, out *machine.OpenStackMachineClass, s conversion.Scope) error { - return autoConvert_v1alpha1_OpenStackMachineClass_To_machine_OpenStackMachineClass(in, out, s) -} - -func autoConvert_machine_OpenStackMachineClass_To_v1alpha1_OpenStackMachineClass(in *machine.OpenStackMachineClass, out *OpenStackMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_machine_OpenStackMachineClassSpec_To_v1alpha1_OpenStackMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_machine_OpenStackMachineClass_To_v1alpha1_OpenStackMachineClass is an autogenerated conversion function. -func Convert_machine_OpenStackMachineClass_To_v1alpha1_OpenStackMachineClass(in *machine.OpenStackMachineClass, out *OpenStackMachineClass, s conversion.Scope) error { - return autoConvert_machine_OpenStackMachineClass_To_v1alpha1_OpenStackMachineClass(in, out, s) -} - -func autoConvert_v1alpha1_OpenStackMachineClassList_To_machine_OpenStackMachineClassList(in *OpenStackMachineClassList, out *machine.OpenStackMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]machine.OpenStackMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_OpenStackMachineClassList_To_machine_OpenStackMachineClassList is an autogenerated conversion function. -func Convert_v1alpha1_OpenStackMachineClassList_To_machine_OpenStackMachineClassList(in *OpenStackMachineClassList, out *machine.OpenStackMachineClassList, s conversion.Scope) error { - return autoConvert_v1alpha1_OpenStackMachineClassList_To_machine_OpenStackMachineClassList(in, out, s) -} - -func autoConvert_machine_OpenStackMachineClassList_To_v1alpha1_OpenStackMachineClassList(in *machine.OpenStackMachineClassList, out *OpenStackMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]OpenStackMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_machine_OpenStackMachineClassList_To_v1alpha1_OpenStackMachineClassList is an autogenerated conversion function. -func Convert_machine_OpenStackMachineClassList_To_v1alpha1_OpenStackMachineClassList(in *machine.OpenStackMachineClassList, out *OpenStackMachineClassList, s conversion.Scope) error { - return autoConvert_machine_OpenStackMachineClassList_To_v1alpha1_OpenStackMachineClassList(in, out, s) -} - -func autoConvert_v1alpha1_OpenStackMachineClassSpec_To_machine_OpenStackMachineClassSpec(in *OpenStackMachineClassSpec, out *machine.OpenStackMachineClassSpec, s conversion.Scope) error { - out.ImageID = in.ImageID - out.ImageName = in.ImageName - out.Region = in.Region - out.AvailabilityZone = in.AvailabilityZone - out.FlavorName = in.FlavorName - out.KeyName = in.KeyName - out.SecurityGroups = *(*[]string)(unsafe.Pointer(&in.SecurityGroups)) - out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags)) - out.NetworkID = in.NetworkID - out.Networks = *(*[]machine.OpenStackNetwork)(unsafe.Pointer(&in.Networks)) - out.SubnetID = (*string)(unsafe.Pointer(in.SubnetID)) - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - out.PodNetworkCidr = in.PodNetworkCidr - out.RootDiskSize = in.RootDiskSize - out.UseConfigDrive = (*bool)(unsafe.Pointer(in.UseConfigDrive)) - out.ServerGroupID = (*string)(unsafe.Pointer(in.ServerGroupID)) - return nil -} - -// Convert_v1alpha1_OpenStackMachineClassSpec_To_machine_OpenStackMachineClassSpec is an autogenerated conversion function. -func Convert_v1alpha1_OpenStackMachineClassSpec_To_machine_OpenStackMachineClassSpec(in *OpenStackMachineClassSpec, out *machine.OpenStackMachineClassSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_OpenStackMachineClassSpec_To_machine_OpenStackMachineClassSpec(in, out, s) -} - -func autoConvert_machine_OpenStackMachineClassSpec_To_v1alpha1_OpenStackMachineClassSpec(in *machine.OpenStackMachineClassSpec, out *OpenStackMachineClassSpec, s conversion.Scope) error { - out.ImageID = in.ImageID - out.ImageName = in.ImageName - out.Region = in.Region - out.AvailabilityZone = in.AvailabilityZone - out.FlavorName = in.FlavorName - out.KeyName = in.KeyName - out.SecurityGroups = *(*[]string)(unsafe.Pointer(&in.SecurityGroups)) - out.Tags = *(*map[string]string)(unsafe.Pointer(&in.Tags)) - out.NetworkID = in.NetworkID - out.Networks = *(*[]OpenStackNetwork)(unsafe.Pointer(&in.Networks)) - out.SubnetID = (*string)(unsafe.Pointer(in.SubnetID)) - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - out.PodNetworkCidr = in.PodNetworkCidr - out.RootDiskSize = in.RootDiskSize - out.UseConfigDrive = (*bool)(unsafe.Pointer(in.UseConfigDrive)) - out.ServerGroupID = (*string)(unsafe.Pointer(in.ServerGroupID)) - return nil -} - -// Convert_machine_OpenStackMachineClassSpec_To_v1alpha1_OpenStackMachineClassSpec is an autogenerated conversion function. -func Convert_machine_OpenStackMachineClassSpec_To_v1alpha1_OpenStackMachineClassSpec(in *machine.OpenStackMachineClassSpec, out *OpenStackMachineClassSpec, s conversion.Scope) error { - return autoConvert_machine_OpenStackMachineClassSpec_To_v1alpha1_OpenStackMachineClassSpec(in, out, s) -} - -func autoConvert_v1alpha1_OpenStackNetwork_To_machine_OpenStackNetwork(in *OpenStackNetwork, out *machine.OpenStackNetwork, s conversion.Scope) error { - out.Id = in.Id - out.Name = in.Name - out.PodNetwork = in.PodNetwork - return nil -} - -// Convert_v1alpha1_OpenStackNetwork_To_machine_OpenStackNetwork is an autogenerated conversion function. -func Convert_v1alpha1_OpenStackNetwork_To_machine_OpenStackNetwork(in *OpenStackNetwork, out *machine.OpenStackNetwork, s conversion.Scope) error { - return autoConvert_v1alpha1_OpenStackNetwork_To_machine_OpenStackNetwork(in, out, s) -} - -func autoConvert_machine_OpenStackNetwork_To_v1alpha1_OpenStackNetwork(in *machine.OpenStackNetwork, out *OpenStackNetwork, s conversion.Scope) error { - out.Id = in.Id - out.Name = in.Name - out.PodNetwork = in.PodNetwork - return nil -} - -// Convert_machine_OpenStackNetwork_To_v1alpha1_OpenStackNetwork is an autogenerated conversion function. -func Convert_machine_OpenStackNetwork_To_v1alpha1_OpenStackNetwork(in *machine.OpenStackNetwork, out *OpenStackNetwork, s conversion.Scope) error { - return autoConvert_machine_OpenStackNetwork_To_v1alpha1_OpenStackNetwork(in, out, s) -} - -func autoConvert_v1alpha1_PacketMachineClass_To_machine_PacketMachineClass(in *PacketMachineClass, out *machine.PacketMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_PacketMachineClassSpec_To_machine_PacketMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_PacketMachineClass_To_machine_PacketMachineClass is an autogenerated conversion function. -func Convert_v1alpha1_PacketMachineClass_To_machine_PacketMachineClass(in *PacketMachineClass, out *machine.PacketMachineClass, s conversion.Scope) error { - return autoConvert_v1alpha1_PacketMachineClass_To_machine_PacketMachineClass(in, out, s) -} - -func autoConvert_machine_PacketMachineClass_To_v1alpha1_PacketMachineClass(in *machine.PacketMachineClass, out *PacketMachineClass, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_machine_PacketMachineClassSpec_To_v1alpha1_PacketMachineClassSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_machine_PacketMachineClass_To_v1alpha1_PacketMachineClass is an autogenerated conversion function. -func Convert_machine_PacketMachineClass_To_v1alpha1_PacketMachineClass(in *machine.PacketMachineClass, out *PacketMachineClass, s conversion.Scope) error { - return autoConvert_machine_PacketMachineClass_To_v1alpha1_PacketMachineClass(in, out, s) -} - -func autoConvert_v1alpha1_PacketMachineClassList_To_machine_PacketMachineClassList(in *PacketMachineClassList, out *machine.PacketMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]machine.PacketMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_PacketMachineClassList_To_machine_PacketMachineClassList is an autogenerated conversion function. -func Convert_v1alpha1_PacketMachineClassList_To_machine_PacketMachineClassList(in *PacketMachineClassList, out *machine.PacketMachineClassList, s conversion.Scope) error { - return autoConvert_v1alpha1_PacketMachineClassList_To_machine_PacketMachineClassList(in, out, s) -} - -func autoConvert_machine_PacketMachineClassList_To_v1alpha1_PacketMachineClassList(in *machine.PacketMachineClassList, out *PacketMachineClassList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]PacketMachineClass)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_machine_PacketMachineClassList_To_v1alpha1_PacketMachineClassList is an autogenerated conversion function. -func Convert_machine_PacketMachineClassList_To_v1alpha1_PacketMachineClassList(in *machine.PacketMachineClassList, out *PacketMachineClassList, s conversion.Scope) error { - return autoConvert_machine_PacketMachineClassList_To_v1alpha1_PacketMachineClassList(in, out, s) -} - -func autoConvert_v1alpha1_PacketMachineClassSpec_To_machine_PacketMachineClassSpec(in *PacketMachineClassSpec, out *machine.PacketMachineClassSpec, s conversion.Scope) error { - out.Facility = *(*[]string)(unsafe.Pointer(&in.Facility)) - out.MachineType = in.MachineType - out.BillingCycle = in.BillingCycle - out.OS = in.OS - out.ProjectID = in.ProjectID - out.Tags = *(*[]string)(unsafe.Pointer(&in.Tags)) - out.SSHKeys = *(*[]string)(unsafe.Pointer(&in.SSHKeys)) - out.UserData = in.UserData - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - return nil -} - -// Convert_v1alpha1_PacketMachineClassSpec_To_machine_PacketMachineClassSpec is an autogenerated conversion function. -func Convert_v1alpha1_PacketMachineClassSpec_To_machine_PacketMachineClassSpec(in *PacketMachineClassSpec, out *machine.PacketMachineClassSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_PacketMachineClassSpec_To_machine_PacketMachineClassSpec(in, out, s) -} - -func autoConvert_machine_PacketMachineClassSpec_To_v1alpha1_PacketMachineClassSpec(in *machine.PacketMachineClassSpec, out *PacketMachineClassSpec, s conversion.Scope) error { - out.Facility = *(*[]string)(unsafe.Pointer(&in.Facility)) - out.MachineType = in.MachineType - out.OS = in.OS - out.ProjectID = in.ProjectID - out.BillingCycle = in.BillingCycle - out.Tags = *(*[]string)(unsafe.Pointer(&in.Tags)) - out.SSHKeys = *(*[]string)(unsafe.Pointer(&in.SSHKeys)) - out.UserData = in.UserData - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.CredentialsSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.CredentialsSecretRef)) - return nil -} - -// Convert_machine_PacketMachineClassSpec_To_v1alpha1_PacketMachineClassSpec is an autogenerated conversion function. -func Convert_machine_PacketMachineClassSpec_To_v1alpha1_PacketMachineClassSpec(in *machine.PacketMachineClassSpec, out *PacketMachineClassSpec, s conversion.Scope) error { - return autoConvert_machine_PacketMachineClassSpec_To_v1alpha1_PacketMachineClassSpec(in, out, s) -} - func autoConvert_v1alpha1_RollbackConfig_To_machine_RollbackConfig(in *RollbackConfig, out *machine.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/zz_generated.deepcopy.go index b55e3928a..35b368e1a 100644 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1/zz_generated.deepcopy.go @@ -29,1075 +29,34 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSBlockDeviceMappingSpec) DeepCopyInto(out *AWSBlockDeviceMappingSpec) { - *out = *in - in.Ebs.DeepCopyInto(&out.Ebs) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSBlockDeviceMappingSpec. -func (in *AWSBlockDeviceMappingSpec) DeepCopy() *AWSBlockDeviceMappingSpec { - if in == nil { - return nil - } - out := new(AWSBlockDeviceMappingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSEbsBlockDeviceSpec) DeepCopyInto(out *AWSEbsBlockDeviceSpec) { - *out = *in - if in.DeleteOnTermination != nil { - in, out := &in.DeleteOnTermination, &out.DeleteOnTermination - *out = new(bool) - **out = **in - } - if in.KmsKeyID != nil { - in, out := &in.KmsKeyID, &out.KmsKeyID - *out = new(string) - **out = **in - } - if in.SnapshotID != nil { - in, out := &in.SnapshotID, &out.SnapshotID - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEbsBlockDeviceSpec. -func (in *AWSEbsBlockDeviceSpec) DeepCopy() *AWSEbsBlockDeviceSpec { - if in == nil { - return nil - } - out := new(AWSEbsBlockDeviceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSIAMProfileSpec) DeepCopyInto(out *AWSIAMProfileSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSIAMProfileSpec. -func (in *AWSIAMProfileSpec) DeepCopy() *AWSIAMProfileSpec { - if in == nil { - return nil - } - out := new(AWSIAMProfileSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineClass) DeepCopyInto(out *AWSMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineClass. -func (in *AWSMachineClass) DeepCopy() *AWSMachineClass { - if in == nil { - return nil - } - out := new(AWSMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineClassList) DeepCopyInto(out *AWSMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AWSMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineClassList. -func (in *AWSMachineClassList) DeepCopy() *AWSMachineClassList { - if in == nil { - return nil - } - out := new(AWSMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineClassSpec) DeepCopyInto(out *AWSMachineClassSpec) { - *out = *in - if in.BlockDevices != nil { - in, out := &in.BlockDevices, &out.BlockDevices - *out = make([]AWSBlockDeviceMappingSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - out.IAM = in.IAM - if in.NetworkInterfaces != nil { - in, out := &in.NetworkInterfaces, &out.NetworkInterfaces - *out = make([]AWSNetworkInterfaceSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.SpotPrice != nil { - in, out := &in.SpotPrice, &out.SpotPrice - *out = new(string) - **out = **in - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineClassSpec. -func (in *AWSMachineClassSpec) DeepCopy() *AWSMachineClassSpec { - if in == nil { - return nil - } - out := new(AWSMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSNetworkInterfaceSpec) DeepCopyInto(out *AWSNetworkInterfaceSpec) { - *out = *in - if in.AssociatePublicIPAddress != nil { - in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress - *out = new(bool) - **out = **in - } - if in.DeleteOnTermination != nil { - in, out := &in.DeleteOnTermination, &out.DeleteOnTermination - *out = new(bool) - **out = **in - } - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.SecurityGroupIDs != nil { - in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNetworkInterfaceSpec. -func (in *AWSNetworkInterfaceSpec) DeepCopy() *AWSNetworkInterfaceSpec { - if in == nil { - return nil - } - out := new(AWSNetworkInterfaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudDataDisk) DeepCopyInto(out *AlicloudDataDisk) { - *out = *in - if in.DeleteWithInstance != nil { - in, out := &in.DeleteWithInstance, &out.DeleteWithInstance - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudDataDisk. -func (in *AlicloudDataDisk) DeepCopy() *AlicloudDataDisk { - if in == nil { - return nil - } - out := new(AlicloudDataDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudMachineClass) DeepCopyInto(out *AlicloudMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudMachineClass. -func (in *AlicloudMachineClass) DeepCopy() *AlicloudMachineClass { - if in == nil { - return nil - } - out := new(AlicloudMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AlicloudMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudMachineClassList) DeepCopyInto(out *AlicloudMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AlicloudMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudMachineClassList. -func (in *AlicloudMachineClassList) DeepCopy() *AlicloudMachineClassList { - if in == nil { - return nil - } - out := new(AlicloudMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AlicloudMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudMachineClassSpec) DeepCopyInto(out *AlicloudMachineClassSpec) { - *out = *in - if in.SystemDisk != nil { - in, out := &in.SystemDisk, &out.SystemDisk - *out = new(AlicloudSystemDisk) - **out = **in - } - if in.DataDisks != nil { - in, out := &in.DataDisks, &out.DataDisks - *out = make([]AlicloudDataDisk, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InternetMaxBandwidthIn != nil { - in, out := &in.InternetMaxBandwidthIn, &out.InternetMaxBandwidthIn - *out = new(int) - **out = **in - } - if in.InternetMaxBandwidthOut != nil { - in, out := &in.InternetMaxBandwidthOut, &out.InternetMaxBandwidthOut - *out = new(int) - **out = **in - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudMachineClassSpec. -func (in *AlicloudMachineClassSpec) DeepCopy() *AlicloudMachineClassSpec { - if in == nil { - return nil - } - out := new(AlicloudMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudSystemDisk) DeepCopyInto(out *AlicloudSystemDisk) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudSystemDisk. -func (in *AlicloudSystemDisk) DeepCopy() *AlicloudSystemDisk { - if in == nil { - return nil - } - out := new(AlicloudSystemDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureDataDisk) DeepCopyInto(out *AzureDataDisk) { - *out = *in - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDataDisk. -func (in *AzureDataDisk) DeepCopy() *AzureDataDisk { - if in == nil { - return nil - } - out := new(AzureDataDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureHardwareProfile) DeepCopyInto(out *AzureHardwareProfile) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureHardwareProfile. -func (in *AzureHardwareProfile) DeepCopy() *AzureHardwareProfile { - if in == nil { - return nil - } - out := new(AzureHardwareProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureImageReference) DeepCopyInto(out *AzureImageReference) { - *out = *in - if in.URN != nil { - in, out := &in.URN, &out.URN - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureImageReference. -func (in *AzureImageReference) DeepCopy() *AzureImageReference { - if in == nil { - return nil - } - out := new(AzureImageReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureLinuxConfiguration) DeepCopyInto(out *AzureLinuxConfiguration) { - *out = *in - out.SSH = in.SSH - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureLinuxConfiguration. -func (in *AzureLinuxConfiguration) DeepCopy() *AzureLinuxConfiguration { - if in == nil { - return nil - } - out := new(AzureLinuxConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineClass) DeepCopyInto(out *AzureMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineClass. -func (in *AzureMachineClass) DeepCopy() *AzureMachineClass { - if in == nil { - return nil - } - out := new(AzureMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AzureMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineClassList) DeepCopyInto(out *AzureMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AzureMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineClassList. -func (in *AzureMachineClassList) DeepCopy() *AzureMachineClassList { - if in == nil { - return nil - } - out := new(AzureMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AzureMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineClassSpec) DeepCopyInto(out *AzureMachineClassSpec) { - *out = *in - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Properties.DeepCopyInto(&out.Properties) - in.SubnetInfo.DeepCopyInto(&out.SubnetInfo) - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineClassSpec. -func (in *AzureMachineClassSpec) DeepCopy() *AzureMachineClassSpec { - if in == nil { - return nil - } - out := new(AzureMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineSetConfig) DeepCopyInto(out *AzureMachineSetConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineSetConfig. -func (in *AzureMachineSetConfig) DeepCopy() *AzureMachineSetConfig { - if in == nil { - return nil - } - out := new(AzureMachineSetConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureManagedDiskParameters) DeepCopyInto(out *AzureManagedDiskParameters) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedDiskParameters. -func (in *AzureManagedDiskParameters) DeepCopy() *AzureManagedDiskParameters { - if in == nil { - return nil - } - out := new(AzureManagedDiskParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureNetworkInterfaceReference) DeepCopyInto(out *AzureNetworkInterfaceReference) { - *out = *in - if in.AzureNetworkInterfaceReferenceProperties != nil { - in, out := &in.AzureNetworkInterfaceReferenceProperties, &out.AzureNetworkInterfaceReferenceProperties - *out = new(AzureNetworkInterfaceReferenceProperties) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkInterfaceReference. -func (in *AzureNetworkInterfaceReference) DeepCopy() *AzureNetworkInterfaceReference { - if in == nil { - return nil - } - out := new(AzureNetworkInterfaceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureNetworkInterfaceReferenceProperties) DeepCopyInto(out *AzureNetworkInterfaceReferenceProperties) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkInterfaceReferenceProperties. -func (in *AzureNetworkInterfaceReferenceProperties) DeepCopy() *AzureNetworkInterfaceReferenceProperties { - if in == nil { - return nil - } - out := new(AzureNetworkInterfaceReferenceProperties) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureNetworkProfile) DeepCopyInto(out *AzureNetworkProfile) { - *out = *in - in.NetworkInterfaces.DeepCopyInto(&out.NetworkInterfaces) - if in.AcceleratedNetworking != nil { - in, out := &in.AcceleratedNetworking, &out.AcceleratedNetworking - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkProfile. -func (in *AzureNetworkProfile) DeepCopy() *AzureNetworkProfile { - if in == nil { - return nil - } - out := new(AzureNetworkProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureOSDisk) DeepCopyInto(out *AzureOSDisk) { - *out = *in - out.ManagedDisk = in.ManagedDisk - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureOSDisk. -func (in *AzureOSDisk) DeepCopy() *AzureOSDisk { - if in == nil { - return nil - } - out := new(AzureOSDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureOSProfile) DeepCopyInto(out *AzureOSProfile) { - *out = *in - out.LinuxConfiguration = in.LinuxConfiguration - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureOSProfile. -func (in *AzureOSProfile) DeepCopy() *AzureOSProfile { - if in == nil { - return nil - } - out := new(AzureOSProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureSSHConfiguration) DeepCopyInto(out *AzureSSHConfiguration) { - *out = *in - out.PublicKeys = in.PublicKeys - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSSHConfiguration. -func (in *AzureSSHConfiguration) DeepCopy() *AzureSSHConfiguration { - if in == nil { - return nil - } - out := new(AzureSSHConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureSSHPublicKey) DeepCopyInto(out *AzureSSHPublicKey) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSSHPublicKey. -func (in *AzureSSHPublicKey) DeepCopy() *AzureSSHPublicKey { - if in == nil { - return nil - } - out := new(AzureSSHPublicKey) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureStorageProfile) DeepCopyInto(out *AzureStorageProfile) { - *out = *in - in.ImageReference.DeepCopyInto(&out.ImageReference) - out.OsDisk = in.OsDisk - if in.DataDisks != nil { - in, out := &in.DataDisks, &out.DataDisks - *out = make([]AzureDataDisk, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStorageProfile. -func (in *AzureStorageProfile) DeepCopy() *AzureStorageProfile { - if in == nil { - return nil - } - out := new(AzureStorageProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureSubResource) DeepCopyInto(out *AzureSubResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSubResource. -func (in *AzureSubResource) DeepCopy() *AzureSubResource { - if in == nil { - return nil - } - out := new(AzureSubResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureSubnetInfo) DeepCopyInto(out *AzureSubnetInfo) { - *out = *in - if in.VnetResourceGroup != nil { - in, out := &in.VnetResourceGroup, &out.VnetResourceGroup - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSubnetInfo. -func (in *AzureSubnetInfo) DeepCopy() *AzureSubnetInfo { - if in == nil { - return nil - } - out := new(AzureSubnetInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureVirtualMachineProperties) DeepCopyInto(out *AzureVirtualMachineProperties) { - *out = *in - out.HardwareProfile = in.HardwareProfile - in.StorageProfile.DeepCopyInto(&out.StorageProfile) - out.OsProfile = in.OsProfile - in.NetworkProfile.DeepCopyInto(&out.NetworkProfile) - if in.AvailabilitySet != nil { - in, out := &in.AvailabilitySet, &out.AvailabilitySet - *out = new(AzureSubResource) - **out = **in - } - if in.IdentityID != nil { - in, out := &in.IdentityID, &out.IdentityID - *out = new(string) - **out = **in - } - if in.Zone != nil { - in, out := &in.Zone, &out.Zone - *out = new(int) - **out = **in - } - if in.MachineSet != nil { - in, out := &in.MachineSet, &out.MachineSet - *out = new(AzureMachineSetConfig) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureVirtualMachineProperties. -func (in *AzureVirtualMachineProperties) DeepCopy() *AzureVirtualMachineProperties { - if in == nil { - return nil - } - out := new(AzureVirtualMachineProperties) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClassSpec) DeepCopyInto(out *ClassSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassSpec. -func (in *ClassSpec) DeepCopy() *ClassSpec { - if in == nil { - return nil - } - out := new(ClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CurrentStatus) DeepCopyInto(out *CurrentStatus) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurrentStatus. -func (in *CurrentStatus) DeepCopy() *CurrentStatus { - if in == nil { - return nil - } - out := new(CurrentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPDisk) DeepCopyInto(out *GCPDisk) { - *out = *in - if in.AutoDelete != nil { - in, out := &in.AutoDelete, &out.AutoDelete - *out = new(bool) - **out = **in - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDisk. -func (in *GCPDisk) DeepCopy() *GCPDisk { - if in == nil { - return nil - } - out := new(GCPDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineClass) DeepCopyInto(out *GCPMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineClass. -func (in *GCPMachineClass) DeepCopy() *GCPMachineClass { - if in == nil { - return nil - } - out := new(GCPMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GCPMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineClassList) DeepCopyInto(out *GCPMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]GCPMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineClassList. -func (in *GCPMachineClassList) DeepCopy() *GCPMachineClassList { - if in == nil { - return nil - } - out := new(GCPMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GCPMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineClassSpec) DeepCopyInto(out *GCPMachineClassSpec) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.Disks != nil { - in, out := &in.Disks, &out.Disks - *out = make([]*GCPDisk, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GCPDisk) - (*in).DeepCopyInto(*out) - } - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Metadata != nil { - in, out := &in.Metadata, &out.Metadata - *out = make([]*GCPMetadata, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GCPMetadata) - (*in).DeepCopyInto(*out) - } - } - } - if in.NetworkInterfaces != nil { - in, out := &in.NetworkInterfaces, &out.NetworkInterfaces - *out = make([]*GCPNetworkInterface, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GCPNetworkInterface) - **out = **in - } - } - } - out.Scheduling = in.Scheduling - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.ServiceAccounts != nil { - in, out := &in.ServiceAccounts, &out.ServiceAccounts - *out = make([]GCPServiceAccount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineClassSpec. -func (in *GCPMachineClassSpec) DeepCopy() *GCPMachineClassSpec { - if in == nil { - return nil - } - out := new(GCPMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMetadata) DeepCopyInto(out *GCPMetadata) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMetadata. -func (in *GCPMetadata) DeepCopy() *GCPMetadata { - if in == nil { - return nil - } - out := new(GCPMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPNetworkInterface) DeepCopyInto(out *GCPNetworkInterface) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPNetworkInterface. -func (in *GCPNetworkInterface) DeepCopy() *GCPNetworkInterface { - if in == nil { - return nil - } - out := new(GCPNetworkInterface) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPScheduling) DeepCopyInto(out *GCPScheduling) { +func (in *ClassSpec) DeepCopyInto(out *ClassSpec) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPScheduling. -func (in *GCPScheduling) DeepCopy() *GCPScheduling { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassSpec. +func (in *ClassSpec) DeepCopy() *ClassSpec { if in == nil { return nil } - out := new(GCPScheduling) + out := new(ClassSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPServiceAccount) DeepCopyInto(out *GCPServiceAccount) { +func (in *CurrentStatus) DeepCopyInto(out *CurrentStatus) { *out = *in - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]string, len(*in)) - copy(*out, *in) - } + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceAccount. -func (in *GCPServiceAccount) DeepCopy() *GCPServiceAccount { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurrentStatus. +func (in *CurrentStatus) DeepCopy() *CurrentStatus { if in == nil { return nil } - out := new(GCPServiceAccount) + out := new(CurrentStatus) in.DeepCopyInto(out) return out } @@ -1733,241 +692,6 @@ func (in *NodeTemplateSpec) DeepCopy() *NodeTemplateSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackMachineClass) DeepCopyInto(out *OpenStackMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachineClass. -func (in *OpenStackMachineClass) DeepCopy() *OpenStackMachineClass { - if in == nil { - return nil - } - out := new(OpenStackMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenStackMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackMachineClassList) DeepCopyInto(out *OpenStackMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OpenStackMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachineClassList. -func (in *OpenStackMachineClassList) DeepCopy() *OpenStackMachineClassList { - if in == nil { - return nil - } - out := new(OpenStackMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenStackMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackMachineClassSpec) DeepCopyInto(out *OpenStackMachineClassSpec) { - *out = *in - if in.SecurityGroups != nil { - in, out := &in.SecurityGroups, &out.SecurityGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Networks != nil { - in, out := &in.Networks, &out.Networks - *out = make([]OpenStackNetwork, len(*in)) - copy(*out, *in) - } - if in.SubnetID != nil { - in, out := &in.SubnetID, &out.SubnetID - *out = new(string) - **out = **in - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.UseConfigDrive != nil { - in, out := &in.UseConfigDrive, &out.UseConfigDrive - *out = new(bool) - **out = **in - } - if in.ServerGroupID != nil { - in, out := &in.ServerGroupID, &out.ServerGroupID - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachineClassSpec. -func (in *OpenStackMachineClassSpec) DeepCopy() *OpenStackMachineClassSpec { - if in == nil { - return nil - } - out := new(OpenStackMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackNetwork) DeepCopyInto(out *OpenStackNetwork) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackNetwork. -func (in *OpenStackNetwork) DeepCopy() *OpenStackNetwork { - if in == nil { - return nil - } - out := new(OpenStackNetwork) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PacketMachineClass) DeepCopyInto(out *PacketMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineClass. -func (in *PacketMachineClass) DeepCopy() *PacketMachineClass { - if in == nil { - return nil - } - out := new(PacketMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PacketMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PacketMachineClassList) DeepCopyInto(out *PacketMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PacketMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineClassList. -func (in *PacketMachineClassList) DeepCopy() *PacketMachineClassList { - if in == nil { - return nil - } - out := new(PacketMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PacketMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PacketMachineClassSpec) DeepCopyInto(out *PacketMachineClassSpec) { - *out = *in - if in.Facility != nil { - in, out := &in.Facility, &out.Facility - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SSHKeys != nil { - in, out := &in.SSHKeys, &out.SSHKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineClassSpec. -func (in *PacketMachineClassSpec) DeepCopy() *PacketMachineClassSpec { - if in == nil { - return nil - } - out := new(PacketMachineClassSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { *out = *in diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/zz_generated.deepcopy.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/zz_generated.deepcopy.go index 72a43a822..e9bb94643 100644 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/zz_generated.deepcopy.go +++ b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/zz_generated.deepcopy.go @@ -29,1075 +29,34 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSBlockDeviceMappingSpec) DeepCopyInto(out *AWSBlockDeviceMappingSpec) { - *out = *in - in.Ebs.DeepCopyInto(&out.Ebs) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSBlockDeviceMappingSpec. -func (in *AWSBlockDeviceMappingSpec) DeepCopy() *AWSBlockDeviceMappingSpec { - if in == nil { - return nil - } - out := new(AWSBlockDeviceMappingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSEbsBlockDeviceSpec) DeepCopyInto(out *AWSEbsBlockDeviceSpec) { - *out = *in - if in.DeleteOnTermination != nil { - in, out := &in.DeleteOnTermination, &out.DeleteOnTermination - *out = new(bool) - **out = **in - } - if in.KmsKeyID != nil { - in, out := &in.KmsKeyID, &out.KmsKeyID - *out = new(string) - **out = **in - } - if in.SnapshotID != nil { - in, out := &in.SnapshotID, &out.SnapshotID - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEbsBlockDeviceSpec. -func (in *AWSEbsBlockDeviceSpec) DeepCopy() *AWSEbsBlockDeviceSpec { - if in == nil { - return nil - } - out := new(AWSEbsBlockDeviceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSIAMProfileSpec) DeepCopyInto(out *AWSIAMProfileSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSIAMProfileSpec. -func (in *AWSIAMProfileSpec) DeepCopy() *AWSIAMProfileSpec { - if in == nil { - return nil - } - out := new(AWSIAMProfileSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineClass) DeepCopyInto(out *AWSMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineClass. -func (in *AWSMachineClass) DeepCopy() *AWSMachineClass { - if in == nil { - return nil - } - out := new(AWSMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineClassList) DeepCopyInto(out *AWSMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AWSMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineClassList. -func (in *AWSMachineClassList) DeepCopy() *AWSMachineClassList { - if in == nil { - return nil - } - out := new(AWSMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineClassSpec) DeepCopyInto(out *AWSMachineClassSpec) { - *out = *in - if in.BlockDevices != nil { - in, out := &in.BlockDevices, &out.BlockDevices - *out = make([]AWSBlockDeviceMappingSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - out.IAM = in.IAM - if in.NetworkInterfaces != nil { - in, out := &in.NetworkInterfaces, &out.NetworkInterfaces - *out = make([]AWSNetworkInterfaceSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.SpotPrice != nil { - in, out := &in.SpotPrice, &out.SpotPrice - *out = new(string) - **out = **in - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineClassSpec. -func (in *AWSMachineClassSpec) DeepCopy() *AWSMachineClassSpec { - if in == nil { - return nil - } - out := new(AWSMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSNetworkInterfaceSpec) DeepCopyInto(out *AWSNetworkInterfaceSpec) { - *out = *in - if in.AssociatePublicIPAddress != nil { - in, out := &in.AssociatePublicIPAddress, &out.AssociatePublicIPAddress - *out = new(bool) - **out = **in - } - if in.DeleteOnTermination != nil { - in, out := &in.DeleteOnTermination, &out.DeleteOnTermination - *out = new(bool) - **out = **in - } - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.SecurityGroupIDs != nil { - in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNetworkInterfaceSpec. -func (in *AWSNetworkInterfaceSpec) DeepCopy() *AWSNetworkInterfaceSpec { - if in == nil { - return nil - } - out := new(AWSNetworkInterfaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudDataDisk) DeepCopyInto(out *AlicloudDataDisk) { - *out = *in - if in.DeleteWithInstance != nil { - in, out := &in.DeleteWithInstance, &out.DeleteWithInstance - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudDataDisk. -func (in *AlicloudDataDisk) DeepCopy() *AlicloudDataDisk { - if in == nil { - return nil - } - out := new(AlicloudDataDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudMachineClass) DeepCopyInto(out *AlicloudMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudMachineClass. -func (in *AlicloudMachineClass) DeepCopy() *AlicloudMachineClass { - if in == nil { - return nil - } - out := new(AlicloudMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AlicloudMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudMachineClassList) DeepCopyInto(out *AlicloudMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AlicloudMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudMachineClassList. -func (in *AlicloudMachineClassList) DeepCopy() *AlicloudMachineClassList { - if in == nil { - return nil - } - out := new(AlicloudMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AlicloudMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudMachineClassSpec) DeepCopyInto(out *AlicloudMachineClassSpec) { - *out = *in - if in.SystemDisk != nil { - in, out := &in.SystemDisk, &out.SystemDisk - *out = new(AlicloudSystemDisk) - **out = **in - } - if in.DataDisks != nil { - in, out := &in.DataDisks, &out.DataDisks - *out = make([]AlicloudDataDisk, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InternetMaxBandwidthIn != nil { - in, out := &in.InternetMaxBandwidthIn, &out.InternetMaxBandwidthIn - *out = new(int) - **out = **in - } - if in.InternetMaxBandwidthOut != nil { - in, out := &in.InternetMaxBandwidthOut, &out.InternetMaxBandwidthOut - *out = new(int) - **out = **in - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudMachineClassSpec. -func (in *AlicloudMachineClassSpec) DeepCopy() *AlicloudMachineClassSpec { - if in == nil { - return nil - } - out := new(AlicloudMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlicloudSystemDisk) DeepCopyInto(out *AlicloudSystemDisk) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlicloudSystemDisk. -func (in *AlicloudSystemDisk) DeepCopy() *AlicloudSystemDisk { - if in == nil { - return nil - } - out := new(AlicloudSystemDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureDataDisk) DeepCopyInto(out *AzureDataDisk) { - *out = *in - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDataDisk. -func (in *AzureDataDisk) DeepCopy() *AzureDataDisk { - if in == nil { - return nil - } - out := new(AzureDataDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureHardwareProfile) DeepCopyInto(out *AzureHardwareProfile) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureHardwareProfile. -func (in *AzureHardwareProfile) DeepCopy() *AzureHardwareProfile { - if in == nil { - return nil - } - out := new(AzureHardwareProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureImageReference) DeepCopyInto(out *AzureImageReference) { - *out = *in - if in.URN != nil { - in, out := &in.URN, &out.URN - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureImageReference. -func (in *AzureImageReference) DeepCopy() *AzureImageReference { - if in == nil { - return nil - } - out := new(AzureImageReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureLinuxConfiguration) DeepCopyInto(out *AzureLinuxConfiguration) { - *out = *in - out.SSH = in.SSH - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureLinuxConfiguration. -func (in *AzureLinuxConfiguration) DeepCopy() *AzureLinuxConfiguration { - if in == nil { - return nil - } - out := new(AzureLinuxConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineClass) DeepCopyInto(out *AzureMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineClass. -func (in *AzureMachineClass) DeepCopy() *AzureMachineClass { - if in == nil { - return nil - } - out := new(AzureMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AzureMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineClassList) DeepCopyInto(out *AzureMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AzureMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineClassList. -func (in *AzureMachineClassList) DeepCopy() *AzureMachineClassList { - if in == nil { - return nil - } - out := new(AzureMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AzureMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineClassSpec) DeepCopyInto(out *AzureMachineClassSpec) { - *out = *in - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Properties.DeepCopyInto(&out.Properties) - in.SubnetInfo.DeepCopyInto(&out.SubnetInfo) - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineClassSpec. -func (in *AzureMachineClassSpec) DeepCopy() *AzureMachineClassSpec { - if in == nil { - return nil - } - out := new(AzureMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineSetConfig) DeepCopyInto(out *AzureMachineSetConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineSetConfig. -func (in *AzureMachineSetConfig) DeepCopy() *AzureMachineSetConfig { - if in == nil { - return nil - } - out := new(AzureMachineSetConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureManagedDiskParameters) DeepCopyInto(out *AzureManagedDiskParameters) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedDiskParameters. -func (in *AzureManagedDiskParameters) DeepCopy() *AzureManagedDiskParameters { - if in == nil { - return nil - } - out := new(AzureManagedDiskParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureNetworkInterfaceReference) DeepCopyInto(out *AzureNetworkInterfaceReference) { - *out = *in - if in.AzureNetworkInterfaceReferenceProperties != nil { - in, out := &in.AzureNetworkInterfaceReferenceProperties, &out.AzureNetworkInterfaceReferenceProperties - *out = new(AzureNetworkInterfaceReferenceProperties) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkInterfaceReference. -func (in *AzureNetworkInterfaceReference) DeepCopy() *AzureNetworkInterfaceReference { - if in == nil { - return nil - } - out := new(AzureNetworkInterfaceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureNetworkInterfaceReferenceProperties) DeepCopyInto(out *AzureNetworkInterfaceReferenceProperties) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkInterfaceReferenceProperties. -func (in *AzureNetworkInterfaceReferenceProperties) DeepCopy() *AzureNetworkInterfaceReferenceProperties { - if in == nil { - return nil - } - out := new(AzureNetworkInterfaceReferenceProperties) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureNetworkProfile) DeepCopyInto(out *AzureNetworkProfile) { - *out = *in - in.NetworkInterfaces.DeepCopyInto(&out.NetworkInterfaces) - if in.AcceleratedNetworking != nil { - in, out := &in.AcceleratedNetworking, &out.AcceleratedNetworking - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkProfile. -func (in *AzureNetworkProfile) DeepCopy() *AzureNetworkProfile { - if in == nil { - return nil - } - out := new(AzureNetworkProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureOSDisk) DeepCopyInto(out *AzureOSDisk) { - *out = *in - out.ManagedDisk = in.ManagedDisk - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureOSDisk. -func (in *AzureOSDisk) DeepCopy() *AzureOSDisk { - if in == nil { - return nil - } - out := new(AzureOSDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureOSProfile) DeepCopyInto(out *AzureOSProfile) { - *out = *in - out.LinuxConfiguration = in.LinuxConfiguration - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureOSProfile. -func (in *AzureOSProfile) DeepCopy() *AzureOSProfile { - if in == nil { - return nil - } - out := new(AzureOSProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureSSHConfiguration) DeepCopyInto(out *AzureSSHConfiguration) { - *out = *in - out.PublicKeys = in.PublicKeys - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSSHConfiguration. -func (in *AzureSSHConfiguration) DeepCopy() *AzureSSHConfiguration { - if in == nil { - return nil - } - out := new(AzureSSHConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureSSHPublicKey) DeepCopyInto(out *AzureSSHPublicKey) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSSHPublicKey. -func (in *AzureSSHPublicKey) DeepCopy() *AzureSSHPublicKey { - if in == nil { - return nil - } - out := new(AzureSSHPublicKey) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureStorageProfile) DeepCopyInto(out *AzureStorageProfile) { - *out = *in - in.ImageReference.DeepCopyInto(&out.ImageReference) - out.OsDisk = in.OsDisk - if in.DataDisks != nil { - in, out := &in.DataDisks, &out.DataDisks - *out = make([]AzureDataDisk, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStorageProfile. -func (in *AzureStorageProfile) DeepCopy() *AzureStorageProfile { - if in == nil { - return nil - } - out := new(AzureStorageProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureSubResource) DeepCopyInto(out *AzureSubResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSubResource. -func (in *AzureSubResource) DeepCopy() *AzureSubResource { - if in == nil { - return nil - } - out := new(AzureSubResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureSubnetInfo) DeepCopyInto(out *AzureSubnetInfo) { - *out = *in - if in.VnetResourceGroup != nil { - in, out := &in.VnetResourceGroup, &out.VnetResourceGroup - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSubnetInfo. -func (in *AzureSubnetInfo) DeepCopy() *AzureSubnetInfo { - if in == nil { - return nil - } - out := new(AzureSubnetInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureVirtualMachineProperties) DeepCopyInto(out *AzureVirtualMachineProperties) { - *out = *in - out.HardwareProfile = in.HardwareProfile - in.StorageProfile.DeepCopyInto(&out.StorageProfile) - out.OsProfile = in.OsProfile - in.NetworkProfile.DeepCopyInto(&out.NetworkProfile) - if in.AvailabilitySet != nil { - in, out := &in.AvailabilitySet, &out.AvailabilitySet - *out = new(AzureSubResource) - **out = **in - } - if in.IdentityID != nil { - in, out := &in.IdentityID, &out.IdentityID - *out = new(string) - **out = **in - } - if in.Zone != nil { - in, out := &in.Zone, &out.Zone - *out = new(int) - **out = **in - } - if in.MachineSet != nil { - in, out := &in.MachineSet, &out.MachineSet - *out = new(AzureMachineSetConfig) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureVirtualMachineProperties. -func (in *AzureVirtualMachineProperties) DeepCopy() *AzureVirtualMachineProperties { - if in == nil { - return nil - } - out := new(AzureVirtualMachineProperties) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClassSpec) DeepCopyInto(out *ClassSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassSpec. -func (in *ClassSpec) DeepCopy() *ClassSpec { - if in == nil { - return nil - } - out := new(ClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CurrentStatus) DeepCopyInto(out *CurrentStatus) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurrentStatus. -func (in *CurrentStatus) DeepCopy() *CurrentStatus { - if in == nil { - return nil - } - out := new(CurrentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPDisk) DeepCopyInto(out *GCPDisk) { - *out = *in - if in.AutoDelete != nil { - in, out := &in.AutoDelete, &out.AutoDelete - *out = new(bool) - **out = **in - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDisk. -func (in *GCPDisk) DeepCopy() *GCPDisk { - if in == nil { - return nil - } - out := new(GCPDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineClass) DeepCopyInto(out *GCPMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineClass. -func (in *GCPMachineClass) DeepCopy() *GCPMachineClass { - if in == nil { - return nil - } - out := new(GCPMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GCPMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineClassList) DeepCopyInto(out *GCPMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]GCPMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineClassList. -func (in *GCPMachineClassList) DeepCopy() *GCPMachineClassList { - if in == nil { - return nil - } - out := new(GCPMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GCPMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineClassSpec) DeepCopyInto(out *GCPMachineClassSpec) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.Disks != nil { - in, out := &in.Disks, &out.Disks - *out = make([]*GCPDisk, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GCPDisk) - (*in).DeepCopyInto(*out) - } - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Metadata != nil { - in, out := &in.Metadata, &out.Metadata - *out = make([]*GCPMetadata, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GCPMetadata) - (*in).DeepCopyInto(*out) - } - } - } - if in.NetworkInterfaces != nil { - in, out := &in.NetworkInterfaces, &out.NetworkInterfaces - *out = make([]*GCPNetworkInterface, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GCPNetworkInterface) - **out = **in - } - } - } - out.Scheduling = in.Scheduling - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.ServiceAccounts != nil { - in, out := &in.ServiceAccounts, &out.ServiceAccounts - *out = make([]GCPServiceAccount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineClassSpec. -func (in *GCPMachineClassSpec) DeepCopy() *GCPMachineClassSpec { - if in == nil { - return nil - } - out := new(GCPMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMetadata) DeepCopyInto(out *GCPMetadata) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMetadata. -func (in *GCPMetadata) DeepCopy() *GCPMetadata { - if in == nil { - return nil - } - out := new(GCPMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPNetworkInterface) DeepCopyInto(out *GCPNetworkInterface) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPNetworkInterface. -func (in *GCPNetworkInterface) DeepCopy() *GCPNetworkInterface { - if in == nil { - return nil - } - out := new(GCPNetworkInterface) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPScheduling) DeepCopyInto(out *GCPScheduling) { +func (in *ClassSpec) DeepCopyInto(out *ClassSpec) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPScheduling. -func (in *GCPScheduling) DeepCopy() *GCPScheduling { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClassSpec. +func (in *ClassSpec) DeepCopy() *ClassSpec { if in == nil { return nil } - out := new(GCPScheduling) + out := new(ClassSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPServiceAccount) DeepCopyInto(out *GCPServiceAccount) { +func (in *CurrentStatus) DeepCopyInto(out *CurrentStatus) { *out = *in - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]string, len(*in)) - copy(*out, *in) - } + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceAccount. -func (in *GCPServiceAccount) DeepCopy() *GCPServiceAccount { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurrentStatus. +func (in *CurrentStatus) DeepCopy() *CurrentStatus { if in == nil { return nil } - out := new(GCPServiceAccount) + out := new(CurrentStatus) in.DeepCopyInto(out) return out } @@ -1826,241 +785,6 @@ func (in *NodeTemplateSpec) DeepCopy() *NodeTemplateSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackMachineClass) DeepCopyInto(out *OpenStackMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachineClass. -func (in *OpenStackMachineClass) DeepCopy() *OpenStackMachineClass { - if in == nil { - return nil - } - out := new(OpenStackMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenStackMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackMachineClassList) DeepCopyInto(out *OpenStackMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OpenStackMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachineClassList. -func (in *OpenStackMachineClassList) DeepCopy() *OpenStackMachineClassList { - if in == nil { - return nil - } - out := new(OpenStackMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenStackMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackMachineClassSpec) DeepCopyInto(out *OpenStackMachineClassSpec) { - *out = *in - if in.SecurityGroups != nil { - in, out := &in.SecurityGroups, &out.SecurityGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Networks != nil { - in, out := &in.Networks, &out.Networks - *out = make([]OpenStackNetwork, len(*in)) - copy(*out, *in) - } - if in.SubnetID != nil { - in, out := &in.SubnetID, &out.SubnetID - *out = new(string) - **out = **in - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.UseConfigDrive != nil { - in, out := &in.UseConfigDrive, &out.UseConfigDrive - *out = new(bool) - **out = **in - } - if in.ServerGroupID != nil { - in, out := &in.ServerGroupID, &out.ServerGroupID - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachineClassSpec. -func (in *OpenStackMachineClassSpec) DeepCopy() *OpenStackMachineClassSpec { - if in == nil { - return nil - } - out := new(OpenStackMachineClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenStackNetwork) DeepCopyInto(out *OpenStackNetwork) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackNetwork. -func (in *OpenStackNetwork) DeepCopy() *OpenStackNetwork { - if in == nil { - return nil - } - out := new(OpenStackNetwork) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PacketMachineClass) DeepCopyInto(out *PacketMachineClass) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineClass. -func (in *PacketMachineClass) DeepCopy() *PacketMachineClass { - if in == nil { - return nil - } - out := new(PacketMachineClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PacketMachineClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PacketMachineClassList) DeepCopyInto(out *PacketMachineClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PacketMachineClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineClassList. -func (in *PacketMachineClassList) DeepCopy() *PacketMachineClassList { - if in == nil { - return nil - } - out := new(PacketMachineClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PacketMachineClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PacketMachineClassSpec) DeepCopyInto(out *PacketMachineClassSpec) { - *out = *in - if in.Facility != nil { - in, out := &in.Facility, &out.Facility - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SSHKeys != nil { - in, out := &in.SSHKeys, &out.SSHKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecretRef != nil { - in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef - *out = new(v1.SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacketMachineClassSpec. -func (in *PacketMachineClassSpec) DeepCopy() *PacketMachineClassSpec { - if in == nil { - return nil - } - out := new(PacketMachineClassSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { *out = *in diff --git a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/zz_generated.defaults.go b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/zz_generated.defaults.go index 3fa68441f..d5513ac6d 100644 --- a/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/zz_generated.defaults.go +++ b/vendor/github.com/gardener/machine-controller-manager/pkg/apis/machine/zz_generated.defaults.go @@ -1,3 +1,17 @@ +// Copyright 2023 SAP SE or an SAP affiliate company +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build !ignore_autogenerated // This file was autogenerated by defaulter-gen. Do not edit it manually! diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 087320da7..0f5b8a48c 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -5,7 +5,7 @@ // Package cmp determines equality of values. // // This package is intended to be a more powerful and safer alternative to -// reflect.DeepEqual for comparing whether two values are semantically equal. +// [reflect.DeepEqual] for comparing whether two values are semantically equal. // It is intended to only be used in tests, as performance is not a goal and // it may panic if it cannot compare the values. Its propensity towards // panicking means that its unsuitable for production environments where a @@ -18,16 +18,17 @@ // For example, an equality function may report floats as equal so long as // they are within some tolerance of each other. // -// - Types with an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation -// for the types that they define. +// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method +// to determine equality. This allows package authors to determine +// the equality operation for the types that they define. // // - If no custom equality functions are used and no Equal method is defined, // equality is determined by recursively comparing the primitive kinds on -// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual], // unexported fields are not compared by default; they result in panics -// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) -// or explicitly compared using the Exporter option. +// unless suppressed by using an [Ignore] option +// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) +// or explicitly compared using the [Exporter] option. package cmp import ( @@ -45,14 +46,14 @@ import ( // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// - Let S be the set of all Ignore, Transformer, and Comparer options that +// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that // remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is non-zero, +// If at least one [Ignore] exists in S, then the comparison is ignored. +// If the number of [Transformer] and [Comparer] options in S is non-zero, // then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform +// If S contains a single [Transformer], then use that to transform // the current values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. +// If S contains a single [Comparer], then use that to compare the current values. // Otherwise, evaluation proceeds to the next rule. // // - If the values have an Equal method of the form "(T) Equal(T) bool" or @@ -66,21 +67,22 @@ import ( // Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. -// If a struct contains unexported fields, Equal panics unless an Ignore option -// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option -// explicitly permits comparing the unexported field. +// If a struct contains unexported fields, Equal panics unless an [Ignore] option +// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field +// or the [Exporter] option explicitly permits comparing the unexported field. // // Slices are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored slice or array elements report equal. // Empty non-nil slices and nil slices are not equal; to equate empty slices, -// consider using cmpopts.EquateEmpty. +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Maps are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored map entries report equal. // Map keys are equal according to the == operator. -// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// To use custom comparisons for map keys, consider using +// [github.com/google/go-cmp/cmp/cmpopts.SortMaps]. // Empty non-nil maps and nil maps are not equal; to equate empty maps, -// consider using cmpopts.EquateEmpty. +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Pointers and interfaces are equal if they are both nil or both non-nil, // where they have the same underlying concrete type and recursively diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export.go similarity index 94% rename from vendor/github.com/google/go-cmp/cmp/export_unsafe.go rename to vendor/github.com/google/go-cmp/cmp/export.go index e2c0f74e8..29f82fe6b 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego -// +build !purego - package cmp import ( @@ -12,8 +9,6 @@ import ( "unsafe" ) -const supportExporters = true - // retrieveUnexportedField uses unsafe to forcibly retrieve any field from // a struct such that the value has read-write permissions. // diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go deleted file mode 100644 index ae851fe53..000000000 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -package cmp - -import "reflect" - -const supportExporters = false - -func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { - panic("no support for forcibly accessing unexported fields") -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go similarity index 95% rename from vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go rename to vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go index 16e6860af..e5dfff69a 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego -// +build !purego - package value import ( diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go deleted file mode 100644 index 1a71bfcbd..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -package value - -import "reflect" - -// Pointer is an opaque typed pointer and is guaranteed to be comparable. -type Pointer struct { - p uintptr - t reflect.Type -} - -// PointerOf returns a Pointer from v, which must be a -// reflect.Ptr, reflect.Slice, or reflect.Map. -func PointerOf(v reflect.Value) Pointer { - // NOTE: Storing a pointer as an uintptr is technically incorrect as it - // assumes that the GC implementation does not use a moving collector. - return Pointer{v.Pointer(), v.Type()} -} - -// IsNil reports whether the pointer is nil. -func (p Pointer) IsNil() bool { - return p.p == 0 -} - -// Uintptr returns the pointer as a uintptr. -func (p Pointer) Uintptr() uintptr { - return p.p -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 1f9ca9c48..754496f3b 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -13,15 +13,15 @@ import ( "github.com/google/go-cmp/cmp/internal/function" ) -// Option configures for specific behavior of Equal and Diff. In particular, -// the fundamental Option functions (Ignore, Transformer, and Comparer), +// Option configures for specific behavior of [Equal] and [Diff]. In particular, +// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]), // configure how equality is determined. // -// The fundamental options may be composed with filters (FilterPath and -// FilterValues) to control the scope over which they are applied. +// The fundamental options may be composed with filters ([FilterPath] and +// [FilterValues]) to control the scope over which they are applied. // -// The cmp/cmpopts package provides helper functions for creating options that -// may be used with Equal and Diff. +// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions +// for creating options that may be used with [Equal] and [Diff]. type Option interface { // filter applies all filters and returns the option that remains. // Each option may only read s.curPath and call s.callTTBFunc. @@ -56,9 +56,9 @@ type core struct{} func (core) isCore() {} -// Options is a list of Option values that also satisfies the Option interface. +// Options is a list of [Option] values that also satisfies the [Option] interface. // Helper comparison packages may return an Options value when packing multiple -// Option values into a single Option. When this package processes an Options, +// [Option] values into a single [Option]. When this package processes an Options, // it will be implicitly expanded into a flat list. // // Applying a filter on an Options is equivalent to applying that same filter @@ -105,16 +105,16 @@ func (opts Options) String() string { return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) } -// FilterPath returns a new Option where opt is only evaluated if filter f -// returns true for the current Path in the value tree. +// FilterPath returns a new [Option] where opt is only evaluated if filter f +// returns true for the current [Path] in the value tree. // // This filter is called even if a slice element or map entry is missing and // provides an opportunity to ignore such cases. The filter function must be // symmetric such that the filter result is identical regardless of whether the // missing value is from x or y. // -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. func FilterPath(f func(Path) bool, opt Option) Option { if f == nil { panic("invalid path filter function") @@ -142,7 +142,7 @@ func (f pathFilter) String() string { return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) } -// FilterValues returns a new Option where opt is only evaluated if filter f, +// FilterValues returns a new [Option] where opt is only evaluated if filter f, // which is a function of the form "func(T, T) bool", returns true for the // current pair of values being compared. If either value is invalid or // the type of the values is not assignable to T, then this filter implicitly @@ -154,8 +154,8 @@ func (f pathFilter) String() string { // If T is an interface, it is possible that f is called with two values with // different concrete types that both implement T. // -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. func FilterValues(f interface{}, opt Option) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { @@ -192,9 +192,9 @@ func (f valuesFilter) String() string { return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) } -// Ignore is an Option that causes all comparisons to be ignored. -// This value is intended to be combined with FilterPath or FilterValues. -// It is an error to pass an unfiltered Ignore option to Equal. +// Ignore is an [Option] that causes all comparisons to be ignored. +// This value is intended to be combined with [FilterPath] or [FilterValues]. +// It is an error to pass an unfiltered Ignore option to [Equal]. func Ignore() Option { return ignore{} } type ignore struct{ core } @@ -234,6 +234,8 @@ func (validator) apply(s *state, vx, vy reflect.Value) { name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" + } else if t.Comparable() { + help = "consider using cmpopts.EquateComparable to compare comparable Go types" } } else { // Unnamed type with unexported fields. Derive PkgPath from field. @@ -254,7 +256,7 @@ const identRx = `[_\p{L}][_\p{L}\p{N}]*` var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) -// Transformer returns an Option that applies a transformation function that +// Transformer returns an [Option] that applies a transformation function that // converts values of a certain type into that of another. // // The transformer f must be a function "func(T) R" that converts values of @@ -265,13 +267,14 @@ var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) // same transform to the output of itself (e.g., in the case where the // input and output types are the same), an implicit filter is added such that // a transformer is applicable only if that exact transformer is not already -// in the tail of the Path since the last non-Transform step. +// in the tail of the [Path] since the last non-[Transform] step. // For situations where the implicit filter is still insufficient, -// consider using cmpopts.AcyclicTransformer, which adds a filter -// to prevent the transformer from being recursively applied upon itself. +// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer], +// which adds a filter to prevent the transformer from +// being recursively applied upon itself. // -// The name is a user provided label that is used as the Transform.Name in the -// transformation PathStep (and eventually shown in the Diff output). +// The name is a user provided label that is used as the [Transform.Name] in the +// transformation [PathStep] (and eventually shown in the [Diff] output). // The name must be a valid identifier or qualified identifier in Go syntax. // If empty, an arbitrary name is used. func Transformer(name string, f interface{}) Option { @@ -329,7 +332,7 @@ func (tr transformer) String() string { return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) } -// Comparer returns an Option that determines whether two values are equal +// Comparer returns an [Option] that determines whether two values are equal // to each other. // // The comparer f must be a function "func(T, T) bool" and is implicitly @@ -377,35 +380,32 @@ func (cm comparer) String() string { return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } -// Exporter returns an Option that specifies whether Equal is allowed to +// Exporter returns an [Option] that specifies whether [Equal] is allowed to // introspect into the unexported fields of certain struct types. // // Users of this option must understand that comparing on unexported fields // from external packages is not safe since changes in the internal -// implementation of some external package may cause the result of Equal +// implementation of some external package may cause the result of [Equal] // to unexpectedly change. However, it may be valid to use this option on types // defined in an internal package where the semantic meaning of an unexported // field is in the control of the user. // -// In many cases, a custom Comparer should be used instead that defines +// In many cases, a custom [Comparer] should be used instead that defines // equality as a function of the public API of a type rather than the underlying // unexported implementation. // -// For example, the reflect.Type documentation defines equality to be determined +// For example, the [reflect.Type] documentation defines equality to be determined // by the == operator on the interface (essentially performing a shallow pointer -// comparison) and most attempts to compare *regexp.Regexp types are interested +// comparison) and most attempts to compare *[regexp.Regexp] types are interested // in only checking that the regular expression strings are equal. -// Both of these are accomplished using Comparers: +// Both of these are accomplished using [Comparer] options: // // Comparer(func(x, y reflect.Type) bool { return x == y }) // Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) // -// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore -// all unexported fields on specified struct types. +// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported] +// option can be used to ignore all unexported fields on specified struct types. func Exporter(f func(reflect.Type) bool) Option { - if !supportExporters { - panic("Exporter is not supported on purego builds") - } return exporter(f) } @@ -415,10 +415,10 @@ func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableO panic("not implemented") } -// AllowUnexported returns an Options that allows Equal to forcibly introspect +// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect // unexported fields of the specified struct types. // -// See Exporter for the proper use of this option. +// See [Exporter] for the proper use of this option. func AllowUnexported(types ...interface{}) Option { m := make(map[reflect.Type]bool) for _, typ := range types { @@ -432,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Report (see Reporter). +// is provided by cmp when calling Report (see [Reporter]). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags @@ -445,7 +445,7 @@ func (r Result) Equal() bool { } // ByIgnore reports whether the node is equal because it was ignored. -// This never reports true if Equal reports false. +// This never reports true if [Result.Equal] reports false. func (r Result) ByIgnore() bool { return r.flags&reportByIgnore != 0 } @@ -455,7 +455,7 @@ func (r Result) ByMethod() bool { return r.flags&reportByMethod != 0 } -// ByFunc reports whether a Comparer function determined equality. +// ByFunc reports whether a [Comparer] function determined equality. func (r Result) ByFunc() bool { return r.flags&reportByFunc != 0 } @@ -478,7 +478,7 @@ const ( reportByCycle ) -// Reporter is an Option that can be passed to Equal. When Equal traverses +// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses // the value trees, it calls PushStep as it descends into each node in the // tree and PopStep as it ascend out of the node. The leaves of the tree are // either compared (determined to be equal or not equal) or ignored and reported diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index a0a588502..c3c145642 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -14,9 +14,9 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) -// Path is a list of PathSteps describing the sequence of operations to get +// Path is a list of [PathStep] describing the sequence of operations to get // from some root type to the current position in the value tree. -// The first Path element is always an operation-less PathStep that exists +// The first Path element is always an operation-less [PathStep] that exists // simply to identify the initial type. // // When traversing structs with embedded structs, the embedded struct will @@ -29,8 +29,13 @@ type Path []PathStep // a value's tree structure. Users of this package never need to implement // these types as values of this type will be returned by this package. // -// Implementations of this interface are -// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +// Implementations of this interface: +// - [StructField] +// - [SliceIndex] +// - [MapIndex] +// - [Indirect] +// - [TypeAssertion] +// - [Transform] type PathStep interface { String() string @@ -70,8 +75,9 @@ func (pa *Path) pop() { *pa = (*pa)[:len(*pa)-1] } -// Last returns the last PathStep in the Path. -// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +// Last returns the last [PathStep] in the Path. +// If the path is empty, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. func (pa Path) Last() PathStep { return pa.Index(-1) } @@ -79,7 +85,8 @@ func (pa Path) Last() PathStep { // Index returns the ith step in the Path and supports negative indexing. // A negative index starts counting from the tail of the Path such that -1 // refers to the last step, -2 refers to the second-to-last step, and so on. -// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +// If index is invalid, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. func (pa Path) Index(i int) PathStep { if i < 0 { i = len(pa) + i @@ -168,7 +175,8 @@ func (ps pathStep) String() string { return fmt.Sprintf("{%s}", s) } -// StructField represents a struct field access on a field called Name. +// StructField is a [PathStep] that represents a struct field access +// on a field called [StructField.Name]. type StructField struct{ *structField } type structField struct { pathStep @@ -204,10 +212,11 @@ func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } func (sf StructField) Name() string { return sf.name } // Index is the index of the field in the parent struct type. -// See reflect.Type.Field. +// See [reflect.Type.Field]. func (sf StructField) Index() int { return sf.idx } -// SliceIndex is an index operation on a slice or array at some index Key. +// SliceIndex is a [PathStep] that represents an index operation on +// a slice or array at some index [SliceIndex.Key]. type SliceIndex struct{ *sliceIndex } type sliceIndex struct { pathStep @@ -247,12 +256,12 @@ func (si SliceIndex) Key() int { // all of the indexes to be shifted. If an index is -1, then that // indicates that the element does not exist in the associated slice. // -// Key is guaranteed to return -1 if and only if the indexes returned -// by SplitKeys are not the same. SplitKeys will never return -1 for +// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes +// returned by SplitKeys are not the same. SplitKeys will never return -1 for // both indexes. func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } -// MapIndex is an index operation on a map at some index Key. +// MapIndex is a [PathStep] that represents an index operation on a map at some index Key. type MapIndex struct{ *mapIndex } type mapIndex struct { pathStep @@ -266,7 +275,7 @@ func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", // Key is the value of the map key. func (mi MapIndex) Key() reflect.Value { return mi.key } -// Indirect represents pointer indirection on the parent type. +// Indirect is a [PathStep] that represents pointer indirection on the parent type. type Indirect struct{ *indirect } type indirect struct { pathStep @@ -276,7 +285,7 @@ func (in Indirect) Type() reflect.Type { return in.typ } func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } func (in Indirect) String() string { return "*" } -// TypeAssertion represents a type assertion on an interface. +// TypeAssertion is a [PathStep] that represents a type assertion on an interface. type TypeAssertion struct{ *typeAssertion } type typeAssertion struct { pathStep @@ -286,7 +295,8 @@ func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } -// Transform is a transformation from the parent type to the current type. +// Transform is a [PathStep] that represents a transformation +// from the parent type to the current type. type Transform struct{ *transform } type transform struct { pathStep @@ -297,13 +307,13 @@ func (tf Transform) Type() reflect.Type { return tf.typ } func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } -// Name is the name of the Transformer. +// Name is the name of the [Transformer]. func (tf Transform) Name() string { return tf.trans.name } // Func is the function pointer to the transformer function. func (tf Transform) Func() reflect.Value { return tf.trans.fnc } -// Option returns the originally constructed Transformer option. +// Option returns the originally constructed [Transformer] option. // The == operator can be used to detect the exact option used. func (tf Transform) Option() Option { return tf.trans } diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 2ab41fad3..e39f42284 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -199,7 +199,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, break } sf := t.Field(i) - if supportExporters && !isExported(sf.Name) { + if !isExported(sf.Name) { vv = retrieveUnexportedField(v, sf, true) } s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go index a733bef18..44e368e56 100644 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -44,6 +44,8 @@ func Wrap(outer, inner error) error { // // format is the format of the error message. The string '{{err}}' will // be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() func Wrapf(format string, err error) error { outerMsg := "" if err != nil { @@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) { for _, err := range e.WrappedErrors() { Walk(err, cb) } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) default: cb(err) } @@ -167,3 +172,7 @@ func (w *wrappedError) Error() string { func (w *wrappedError) WrappedErrors() []error { return []error{w.Outer, w.Inner} } + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/ironcore-dev/vgopath/.gitignore b/vendor/github.com/ironcore-dev/vgopath/.gitignore new file mode 100644 index 000000000..c8a6a5a5d --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/.gitignore @@ -0,0 +1,33 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin/ + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* +vendor/ + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ +.vscode/ + +# Utilities +testbin/ + +# Goreleaser +dist/ + diff --git a/vendor/github.com/ironcore-dev/vgopath/.golangci.yaml b/vendor/github.com/ironcore-dev/vgopath/.golangci.yaml new file mode 100644 index 000000000..8bf422070 --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/.golangci.yaml @@ -0,0 +1,20 @@ +run: + timeout: 3m + +linters: + enable: + - revive + - ineffassign + - misspell + - goimports + +severity: + default-severity: error + +linters-settings: + revive: + severity: error + rules: + - name: exported + - name: if-return + disabled: true diff --git a/vendor/github.com/ironcore-dev/vgopath/CODEOWNERS b/vendor/github.com/ironcore-dev/vgopath/CODEOWNERS new file mode 100644 index 000000000..6eede2245 --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/CODEOWNERS @@ -0,0 +1,2 @@ +# ironcore-dev core maintainers +* @ironcore-dev/core diff --git a/vendor/github.com/ironcore-dev/vgopath/CODE_OF_CONDUCT.md b/vendor/github.com/ironcore-dev/vgopath/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..57aa8414c --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +Please refer to the [Gardener on Metal code of conduct](https://ironcore-dev.github.io/documentation/contribute/overview/#code-of-conduct). diff --git a/vendor/github.com/ironcore-dev/vgopath/LICENSE b/vendor/github.com/ironcore-dev/vgopath/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ironcore-dev/vgopath/Makefile b/vendor/github.com/ironcore-dev/vgopath/Makefile new file mode 100644 index 000000000..6a1a70806 --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/Makefile @@ -0,0 +1,105 @@ +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# This is a requirement for 'setup-envtest.sh' in the test target. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: fmt +fmt: goimports ## Run goimports against code. + $(GOIMPORTS) -w . + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: lint +lint: ## Run golangci-lint on the code. + $(GOLANGCILINT) run ./... + +.PHONY: add-license +add-license: addlicense ## Add license headers to all go files. + find . -name '*.go' -exec $(ADDLICENSE) -c 'IronCore authors' {} + + +.PHONY: check-license +check-license: addlicense ## Check that every file has a license header present. + find . -name '*.go' -exec $(ADDLICENSE) -check -c 'IronCore authors' {} + + +.PHONY: check +check: add-license fmt lint test golangci-lint # Generate manifests, code, lint, add licenses, test + +.PHONY: test +test: fmt vet ## Run tests. + go test ./... -coverprofile cover.out + +##@ Build + +.PHONY: build +build: fmt vet ## Build manager binary. + go build -o bin/vgopath main.go + +.PHONY: install +install: ## Install vgopath into GOBIN. + go install . + +.PHONY: run +run: fmt lint ## Run a controller from your host. + go run ./main.go + +##@ Tools + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +ADDLICENSE ?= $(LOCALBIN)/addlicense +GOIMPORTS ?= $(LOCALBIN)/goimports +GOLANGCILINT ?= $(LOCALBIN)/golangci-lint + +## Tool Versions +ADDLICENSE_VERSION ?= v1.1.1 +GOIMPORTS_VERSION ?= v0.14.0 +GOLANGCILINT_VERSION ?= v1.55.1 + +.PHONY: addlicense +addlicense: $(ADDLICENSE) ## Download addlicense locally if necessary. +$(ADDLICENSE): $(LOCALBIN) + test -s $(LOCALBIN)/addlicense || GOBIN=$(LOCALBIN) go install github.com/google/addlicense@$(ADDLICENSE_VERSION) + +.PHONY: goimports +goimports: $(GOIMPORTS) ## Download goimports locally if necessary. +$(GOIMPORTS): $(LOCALBIN) + test -s $(LOCALBIN)/goimports || GOBIN=$(LOCALBIN) go install golang.org/x/tools/cmd/goimports@$(GOIMPORTS_VERSION) + +.PHONY: goimports +golangci-lint: $(GOLANGCILINT) ## Download golangci-lint locally if necessary. +$(GOLANGCILINT): $(LOCALBIN) + test -s $(LOCALBIN)/golangci-lint || GOBIN=$(LOCALBIN) go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCILINT_VERSION) diff --git a/vendor/github.com/ironcore-dev/vgopath/README.md b/vendor/github.com/ironcore-dev/vgopath/README.md new file mode 100644 index 000000000..a15fab909 --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/README.md @@ -0,0 +1,36 @@ +# vgopath + +`vgopath` is a tool for module-enabled projects to set up a 'virtual' GOPATH for +legacy tools to run with (`kubernetes/code-generator` I'm looking at you...). + +## Installation + +The simplest way to install `vgopath` is by running + +```shell +go install github.com/ironcore-dev/vgopath@latest +``` + +## Usage + +`vgopath` has to be run from the module-enabled project root. It requires a +target directory to construct the virtual GOPATH. + +Example usage could look like this: + +```shell +# Create the target directory +mkdir -p my-vgopath + +# Do the linking in my-vgopath +vgopath my-vgopath +``` + +Once done, the structure will look something like + +``` +my-vgopath +├── bin -> /bin +├── pkg -> /pkg +└── src -> various subdirectories +``` diff --git a/vendor/github.com/ironcore-dev/vgopath/internal/cmd/version/version.go b/vendor/github.com/ironcore-dev/vgopath/internal/cmd/version/version.go new file mode 100644 index 000000000..949c7f24f --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/internal/cmd/version/version.go @@ -0,0 +1,40 @@ +// Copyright 2023 IronCore authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "io" + + "github.com/ironcore-dev/vgopath/internal/version" + "github.com/spf13/cobra" +) + +func Command(out io.Writer) *cobra.Command { + cmd := &cobra.Command{ + Use: "version", + Short: "Prints version information.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return Run(out) + }, + } + + return cmd +} + +func Run(out io.Writer) error { + version.FPrint(out) + return nil +} diff --git a/vendor/github.com/ironcore-dev/vgopath/internal/cmd/vgopath/exec/exec.go b/vendor/github.com/ironcore-dev/vgopath/internal/cmd/vgopath/exec/exec.go new file mode 100644 index 000000000..bb08522a5 --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/internal/cmd/vgopath/exec/exec.go @@ -0,0 +1,109 @@ +// Copyright 2023 IronCore authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exec + +import ( + "fmt" + "os" + "os/exec" + "regexp" + + "github.com/ironcore-dev/vgopath/internal/link" + "github.com/spf13/cobra" +) + +func Command() *cobra.Command { + var ( + opts link.Options + dstDir string + shell bool + ) + + cmd := &cobra.Command{ + Use: "exec -- command [args...]", + Short: "Run an executable in a virtual GOPATH.", + Args: func(cmd *cobra.Command, args []string) error { + if !shell { + return cobra.MinimumNArgs(1)(cmd, args) + } + return cobra.ExactArgs(1)(cmd, args) + }, + RunE: func(cmd *cobra.Command, args []string) error { + executable, executableArgs := executableAndArgs(args, shell) + return Run(dstDir, executable, opts, executableArgs) + }, + } + + opts.AddFlags(cmd.Flags()) + cmd.Flags().StringVarP(&dstDir, "dst-dir", "o", "", "Destination directory. If empty, a temporary directory will be created.") + cmd.Flags().BoolVarP(&shell, "shell", "s", false, "Whether to run the command in a shell.") + + return cmd +} + +func executableAndArgs(args []string, doShell bool) (string, []string) { + if !doShell { + return args[0], args[1:] + } + + shell := os.Getenv("SHELL") + if shell == "" { + shell = "/bin/sh" + } + + return shell, []string{"-c", args[0]} +} + +func Run(dstDir, executable string, opts link.Options, args []string) error { + if dstDir == "" { + var err error + dstDir, err = os.MkdirTemp("", "vgopath") + if err != nil { + return fmt.Errorf("error creating temp directory: %w", err) + } + defer func() { _ = os.RemoveAll(dstDir) }() + } + + if err := link.Link(dstDir, opts); err != nil { + return err + } + + cmd := exec.Command(executable, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Dir = dstDir + + cmd.Env = mkEnv(dstDir) + return cmd.Run() +} + +var filterEnvRegexp = regexp.MustCompile(`^(GOPATH|GO111MODULE)=`) + +func mkEnv(gopath string) []string { + env := os.Environ() + res := make([]string, 0, len(env)+2) + + for _, kv := range env { + if !filterEnvRegexp.MatchString(kv) { + res = append(res, kv) + } + } + + return append(res, + fmt.Sprintf("GOPATH=%s", gopath), + "GO111MODULE=off", + ) +} diff --git a/vendor/github.com/ironcore-dev/vgopath/internal/cmd/vgopath/vgopath.go b/vendor/github.com/ironcore-dev/vgopath/internal/cmd/vgopath/vgopath.go new file mode 100644 index 000000000..06583b42f --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/internal/cmd/vgopath/vgopath.go @@ -0,0 +1,63 @@ +// Copyright 2023 IronCore authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vgopath + +import ( + "os" + + "github.com/ironcore-dev/vgopath/internal/cmd/version" + "github.com/ironcore-dev/vgopath/internal/cmd/vgopath/exec" + "github.com/ironcore-dev/vgopath/internal/link" + "github.com/spf13/cobra" +) + +func Command() *cobra.Command { + var ( + opts link.Options + dstDir string + ) + + cmd := &cobra.Command{ + Use: "vgopath", + Short: "Create and operate on virtual GOPATHs", + Long: `Create a 'virtual' GOPATH at the specified directory. + +vgopath will setup a GOPATH folder structure, ensuring that any tool used +to the traditional setup will function as normal. + +The target module will be mirrored to where its go.mod path (the line +after 'module') points at. +`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return Run(dstDir, opts) + }, + } + + opts.AddFlags(cmd.Flags()) + cmd.Flags().StringVarP(&dstDir, "dst-dir", "o", "", "Destination directory.") + _ = cmd.MarkFlagRequired("dst-dir") + + cmd.AddCommand( + exec.Command(), + version.Command(os.Stdout), + ) + + return cmd +} + +func Run(dstDir string, opts link.Options) error { + return link.Link(dstDir, opts) +} diff --git a/vendor/github.com/ironcore-dev/vgopath/internal/link/link.go b/vendor/github.com/ironcore-dev/vgopath/internal/link/link.go new file mode 100644 index 000000000..8a58b0aee --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/internal/link/link.go @@ -0,0 +1,284 @@ +// Copyright 2022 IronCore authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package link + +import ( + "fmt" + "go/build" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "github.com/ironcore-dev/vgopath/internal/module" + "github.com/spf13/pflag" +) + +type Node struct { + Segment string + Module *module.Module + Children []Node +} + +func insertModuleInNode(node *Node, mod module.Module, relativeSegments []string) error { + if len(relativeSegments) == 0 { + if node.Module != nil { + return fmt.Errorf("cannot insert module %s into node %s: module %s already exists", mod.Path, node.Segment, node.Module.Path) + } + + node.Module = &mod + return nil + } + + var ( + idx = -1 + segment = relativeSegments[0] + ) + for i, child := range node.Children { + if child.Segment == segment { + idx = i + break + } + } + + var child *Node + if idx == -1 { + child = &Node{Segment: segment} + } else { + child = &node.Children[idx] + } + + if err := insertModuleInNode(child, mod, relativeSegments[1:]); err != nil { + return err + } + + if idx == -1 { + node.Children = append(node.Children, *child) + } + + return nil +} + +func BuildModuleNodes(modules []module.Module) ([]Node, error) { + sort.Slice(modules, func(i, j int) bool { return modules[i].Path < modules[j].Path }) + nodeByRootSegment := make(map[string]*Node) + + for _, mod := range modules { + if mod.Path == "" { + return nil, fmt.Errorf("invalid empty module path") + } + + segments := strings.Split(mod.Path, "/") + + rootSegment := segments[0] + node, ok := nodeByRootSegment[rootSegment] + if !ok { + node = &Node{Segment: rootSegment} + nodeByRootSegment[rootSegment] = node + } + + if err := insertModuleInNode(node, mod, segments[1:]); err != nil { + return nil, err + } + } + + res := make([]Node, 0, len(nodeByRootSegment)) + for _, node := range nodeByRootSegment { + res = append(res, *node) + } + return res, nil +} + +func FilterModulesWithoutDir(modules []module.Module) []module.Module { + var res []module.Module + for _, mod := range modules { + // Don't vendor modules without backing directory. + if mod.Dir == "" { + continue + } + + res = append(res, mod) + } + + return res +} + +type Options struct { + SrcDir string + SkipGoBin bool + SkipGoSrc bool + SkipGoPkg bool +} + +func (o *Options) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.SrcDir, "src-dir", o.SrcDir, "Source directory for linking. Empty string indicates current directory.") + fs.BoolVar(&o.SkipGoPkg, "skip-go-pkg", o.SkipGoPkg, "Whether to skip mirroring $GOPATH/pkg") + fs.BoolVar(&o.SkipGoBin, "skip-go-bin", o.SkipGoBin, "Whether to skip mirroring $GOBIN") + fs.BoolVar(&o.SkipGoSrc, "skip-go-src", o.SkipGoSrc, "Whether to skip mirroring modules as src") +} + +func Link(dstDir string, opts Options) error { + if opts.SrcDir == "" { + opts.SrcDir = "." + } + + if !opts.SkipGoSrc { + if err := GoSrc(dstDir, opts.SrcDir); err != nil { + return fmt.Errorf("error linking GOPATH/src: %w", err) + } + } + + if !opts.SkipGoBin { + if err := GoBin(dstDir); err != nil { + return fmt.Errorf("error linking GOPATH/bin: %w", err) + } + } + + if !opts.SkipGoPkg { + if err := GoPkg(dstDir); err != nil { + return fmt.Errorf("error linking GOPATH/pkg: %w", err) + } + } + + return nil +} + +func GoBin(dstDir string) error { + dstGoBinDir := filepath.Join(dstDir, "bin") + if err := os.RemoveAll(dstGoBinDir); err != nil { + return err + } + + srcGoBinDir := os.Getenv("GOBIN") + if srcGoBinDir == "" { + srcGoBinDir = filepath.Join(build.Default.GOPATH, "bin") + } + + if err := os.Symlink(srcGoBinDir, dstGoBinDir); err != nil { + return err + } + return nil +} + +func GoPkg(dstDir string) error { + dstGoPkgDir := filepath.Join(dstDir, "pkg") + if err := os.RemoveAll(dstGoPkgDir); err != nil { + return err + } + + if err := os.Symlink(filepath.Join(build.Default.GOPATH, "pkg"), dstGoPkgDir); err != nil { + return err + } + return nil +} + +func GoSrc(dstDir, srcDir string) error { + mods, err := module.ReadAllGoListModules(module.InDir(srcDir)) + if err != nil { + return fmt.Errorf("error reading modules: %w", err) + } + + mods = FilterModulesWithoutDir(mods) + + nodes, err := BuildModuleNodes(mods) + if err != nil { + return fmt.Errorf("error building module tree: %w", err) + } + + dstGoSrcDir := filepath.Join(dstDir, "src") + if err := os.RemoveAll(dstGoSrcDir); err != nil { + return err + } + + if err := os.Mkdir(dstGoSrcDir, 0777); err != nil { + return err + } + + if err := Nodes(dstGoSrcDir, nodes); err != nil { + return err + } + return nil +} + +type linkNodeError struct { + path string + err error +} + +func (l *linkNodeError) Error() string { + return fmt.Sprintf("[path %s]: %v", l.path, l.err) +} + +func joinLinkNodeError(node Node, err error) error { + if linkNodeErr, ok := err.(*linkNodeError); ok { + return &linkNodeError{ + path: path.Join(node.Segment, linkNodeErr.path), + err: linkNodeErr.err, + } + } + return &linkNodeError{ + path: node.Segment, + err: err, + } +} + +func Nodes(dir string, nodes []Node) error { + for _, node := range nodes { + if err := linkNode(dir, node); err != nil { + return joinLinkNodeError(node, err) + } + } + return nil +} + +func linkNode(dir string, node Node) error { + dstDir := filepath.Join(dir, node.Segment) + + // If the node specifies a module and no children are present, we can take optimize and directly + // symlink the module directory to the destination directory. + if node.Module != nil && len(node.Children) == 0 { + srcDir := node.Module.Dir + + if err := os.Symlink(srcDir, dstDir); err != nil { + return fmt.Errorf("error symlinking node: %w", err) + } + } + + if err := os.RemoveAll(dstDir); err != nil { + return err + } + + if err := os.Mkdir(dstDir, 0777); err != nil { + return err + } + + if node.Module != nil { + srcDir := node.Module.Dir + entries, err := os.ReadDir(srcDir) + if err != nil { + return err + } + + for _, entry := range entries { + srcPath := filepath.Join(srcDir, entry.Name()) + dstPath := filepath.Join(dstDir, entry.Name()) + if err := os.Symlink(srcPath, dstPath); err != nil { + return fmt.Errorf("error symlinking entry %s to %s: %w", srcPath, dstPath, err) + } + } + } + return Nodes(dstDir, node.Children) +} diff --git a/vendor/github.com/ironcore-dev/vgopath/internal/module/module.go b/vendor/github.com/ironcore-dev/vgopath/internal/module/module.go new file mode 100644 index 000000000..c5dc27c7f --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/internal/module/module.go @@ -0,0 +1,184 @@ +// Copyright 2023 IronCore authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package module + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "os/exec" + "sync" + "syscall" + "time" +) + +type Module struct { + Path string + Dir string + Version string + Main bool +} + +type Reader interface { + Read(data []Module) (int, error) +} + +type ReadCloser interface { + Reader + io.Closer +} + +type readCloser struct { + mu sync.Mutex + + cmd *exec.Cmd + dec *json.Decoder + + closed bool + closeErr error +} + +type OpenGoListOptions struct { + Dir string + Command func() *exec.Cmd +} + +func (o *OpenGoListOptions) ApplyToOpenGoList(o2 *OpenGoListOptions) { + if o.Dir != "" { + o2.Dir = o.Dir + } + if o.Command != nil { + o2.Command = o.Command + } +} + +func (o *OpenGoListOptions) ApplyOptions(opts []OpenGoListOption) { + for _, opt := range opts { + opt.ApplyToOpenGoList(o) + } +} + +type OpenGoListOption interface { + ApplyToOpenGoList(o *OpenGoListOptions) +} + +type InDir string + +func (d InDir) ApplyToOpenGoList(o *OpenGoListOptions) { + o.Dir = string(d) +} + +func setOpenGoListDefaults(o *OpenGoListOptions) { + if o.Dir == "" { + o.Dir = "." + } + if o.Command == nil { + o.Command = func() *exec.Cmd { + return exec.Command("go", "list", "-m", "-json", "all") + } + } +} + +func OpenGoList(opts ...OpenGoListOption) (ReadCloser, error) { + o := &OpenGoListOptions{} + o.ApplyOptions(opts) + setOpenGoListDefaults(o) + + cmd := o.Command() + cmd.Dir = o.Dir + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + dec := json.NewDecoder(stdout) + + return &readCloser{ + cmd: cmd, + dec: dec, + }, nil +} + +func (r *readCloser) Read(data []Module) (n int, err error) { + for i := 0; i < len(data); i++ { + mod := &data[i] + if err := r.dec.Decode(mod); err != nil { + return i, err + } + } + return len(data), nil +} + +func (r *readCloser) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + + if r.closed { + return r.closeErr + } + + defer func() { r.closed = true }() + + waitDone := make(chan struct{}) + go func() { + defer close(waitDone) + _ = r.cmd.Wait() + }() + + _ = r.cmd.Process.Signal(syscall.SIGTERM) + + timer := time.NewTimer(3 * time.Second) + defer timer.Stop() + + select { + case <-timer.C: + r.closeErr = fmt.Errorf("error waiting for command to be completed") + case <-waitDone: + } + return r.closeErr +} + +func ReadAll(r Reader) ([]Module, error) { + b := make([]Module, 0, 128) + for { + if len(b) == cap(b) { + // Add more capacity (let append pick how much). + b = append(b, Module{})[:len(b)] + } + n, err := r.Read(b[len(b):cap(b)]) + b = b[:len(b)+n] + if err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + return b, err + } + } +} + +func ReadAllGoListModules(opts ...OpenGoListOption) ([]Module, error) { + rc, err := OpenGoList(opts...) + if err != nil { + return nil, err + } + defer func() { _ = rc.Close() }() + + return ReadAll(rc) +} diff --git a/vendor/github.com/ironcore-dev/vgopath/internal/version/version.go b/vendor/github.com/ironcore-dev/vgopath/internal/version/version.go new file mode 100644 index 000000000..6e4ea243f --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/internal/version/version.go @@ -0,0 +1,38 @@ +// Copyright 2023 IronCore authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "fmt" + "io" + "os" + "runtime/debug" +) + +func Version() string { + info, ok := debug.ReadBuildInfo() + if !ok || info == nil || info.Main.Version == "" { + return "(unknown)" + } + return info.Main.Version +} + +func FPrint(w io.Writer) { + _, _ = fmt.Fprintf(w, "Version: %s\n", Version()) +} + +func Print() { + FPrint(os.Stdout) +} diff --git a/vendor/github.com/ironcore-dev/vgopath/main.go b/vendor/github.com/ironcore-dev/vgopath/main.go new file mode 100644 index 000000000..62826886f --- /dev/null +++ b/vendor/github.com/ironcore-dev/vgopath/main.go @@ -0,0 +1,27 @@ +// Copyright 2022 IronCore authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + + "github.com/ironcore-dev/vgopath/internal/cmd/vgopath" +) + +func main() { + if err := vgopath.Command().Execute(); err != nil { + log.Fatalln(err.Error()) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index cb72bd6f2..fea67526e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,32 @@ +## 2.13.0 + +### Features + +Add PreviewSpect() to enable programmatic preview access to the suite report (fixes #1225) + +## 2.12.1 + +### Fixes +- Print logr prefix if it exists (#1275) [90d4846] + +### Maintenance +- Bump actions/checkout from 3 to 4 (#1271) [555f543] +- Bump golang.org/x/sys from 0.11.0 to 0.12.0 (#1270) [d867b7d] + +## 2.12.0 + +### Features + +- feat: allow MustPassRepeatedly decorator to be set at suite level (#1266) [05de518] + +### Fixes + +- fix-errors-in-readme (#1244) [27c2f5d] + +### Maintenance + +Various chores/dependency bumps. + ## 2.11.0 In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways: diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md index d0473a467..cb23ffdf6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/README.md +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -15,7 +15,7 @@ import ( ... ) -Describe("Checking books out of the library", Label("library"), func() { +var _ = Describe("Checking books out of the library", Label("library"), func() { var library *libraries.Library var book *books.Book var valjean *users.User @@ -50,7 +50,7 @@ Describe("Checking books out of the library", Label("library"), func() { It("tells the user", func(ctx SpecContext) { err := valjean.Checkout(ctx, library, "Les Miserables") - Expect(error).To(MatchError("Les Miserables is currently checked out")) + Expect(err).To(MatchError("Les Miserables is currently checked out")) }, SpecTimeout(time.Second * 5)) It("lets the user place a hold and get notified later", func(ctx SpecContext) { @@ -74,7 +74,7 @@ Describe("Checking books out of the library", Label("library"), func() { When("the library does not have the book in question", func() { It("tells the reader the book is unavailable", func(ctx SpecContext) { err := valjean.Checkout(ctx, library, "Les Miserables") - Expect(error).To(MatchError("Les Miserables is not in the library catalog")) + Expect(err).To(MatchError("Les Miserables is not in the library catalog")) }, SpecTimeout(time.Second * 5)) }) }) diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index a244bdc18..2d7a70ecc 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -248,31 +248,13 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { exitIfErr(types.GinkgoErrors.RerunningSuite()) } suiteDidRun = true - - suiteLabels := Labels{} - configErrors := []error{} - for _, arg := range args { - switch arg := arg.(type) { - case types.SuiteConfig: - suiteConfig = arg - case types.ReporterConfig: - reporterConfig = arg - case Labels: - suiteLabels = append(suiteLabels, arg...) - default: - configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) - } + err := global.PushClone() + if err != nil { + exitIfErr(err) } - exitIfErrors(configErrors) + defer global.PopClone() - configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig) - if len(configErrors) > 0 { - fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n")) - for _, err := range configErrors { - fmt.Fprintf(formatter.ColorableStdErr, err.Error()) - } - os.Exit(1) - } + suiteLabels := extractSuiteConfiguration(args) var reporter reporters.Reporter if suiteConfig.ParallelTotal == 1 { @@ -308,9 +290,8 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig) } - err := global.Suite.BuildTree() + err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) @@ -335,6 +316,69 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { return passed } +func extractSuiteConfiguration(args []interface{}) Labels { + suiteLabels := Labels{} + configErrors := []error{} + for _, arg := range args { + switch arg := arg.(type) { + case types.SuiteConfig: + suiteConfig = arg + case types.ReporterConfig: + reporterConfig = arg + case Labels: + suiteLabels = append(suiteLabels, arg...) + default: + configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) + } + } + exitIfErrors(configErrors) + + configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig) + if len(configErrors) > 0 { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n")) + for _, err := range configErrors { + fmt.Fprintf(formatter.ColorableStdErr, err.Error()) + } + os.Exit(1) + } + + return suiteLabels +} + +/* +PreviewSpecs walks the testing tree and produces a report without actually invoking the specs. +See http://onsi.github.io/ginkgo/#previewing-specs for more information. +*/ +func PreviewSpecs(description string, args ...any) Report { + err := global.PushClone() + if err != nil { + exitIfErr(err) + } + defer global.PopClone() + + suiteLabels := extractSuiteConfiguration(args) + priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess + suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1 + defer func() { + suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = priorDryRun, priorParallelTotal, priorParallelProcess + }() + reporter := reporters.NoopReporter{} + outputInterceptor = internal.NoopOutputInterceptor{} + client = nil + writer := GinkgoWriter.(*internal.Writer) + + err = global.Suite.BuildTree() + exitIfErr(err) + suitePath, err := os.Getwd() + exitIfErr(err) + suitePath, err = filepath.Abs(suitePath) + exitIfErr(err) + + global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + + return global.Suite.GetPreviewReport() +} + /* Skip instructs Ginkgo to skip the current spec diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go index 0b9b19fe7..958daccbf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -244,9 +244,7 @@ func labelFromCallExpr(ce *ast.CallExpr) []string { } if id.Name == "Label" { ls := extractLabels(expr) - for _, label := range ls { - labels = append(labels, label) - } + labels = append(labels, ls...) } } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go index f2c0fd89c..464e3c97f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go @@ -6,6 +6,7 @@ import ( var Suite *internal.Suite var Failer *internal.Failer +var backupSuite *internal.Suite func init() { InitializeGlobals() @@ -15,3 +16,13 @@ func InitializeGlobals() { Failer = internal.NewFailer() Suite = internal.NewSuite() } + +func PushClone() error { + var err error + backupSuite, err = Suite.Clone() + return err +} + +func PopClone() { + Suite = backupSuite +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go index ae1b7b011..02c9fe4fc 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -321,7 +321,10 @@ func (g *group) run(specs Specs) { if !skip { var maxAttempts = 1 - if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + if g.suite.config.MustPassRepeatedly > 0 { + maxAttempts = g.suite.config.MustPassRepeatedly + g.suite.currentSpecReport.MaxMustPassRepeatedly = maxAttempts + } else if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { maxAttempts = max(1, spec.MustPassRepeatedly()) } else if g.suite.config.FlakeAttempts > 0 { maxAttempts = g.suite.config.FlakeAttempts diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 14c7cf54e..16f0dc227 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -597,12 +597,16 @@ func (n Node) IsZero() bool { /* Nodes */ type Nodes []Node +func (n Nodes) Clone() Nodes { + nodes := make(Nodes, len(n)) + copy(nodes, n) + return nodes +} + func (n Nodes) CopyAppend(nodes ...Node) Nodes { numN := len(n) out := make(Nodes, numN+len(nodes)) - for i, node := range n { - out[i] = node - } + copy(out, n) for j, node := range nodes { out[numN+j] = node } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index ea0d259d9..fe6e8288a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -77,6 +77,20 @@ func NewSuite() *Suite { } } +func (suite *Suite) Clone() (*Suite, error) { + if suite.phase != PhaseBuildTopLevel { + return nil, fmt.Errorf("cnanot clone suite after tree has been built") + } + return &Suite{ + tree: &TreeNode{}, + phase: PhaseBuildTopLevel, + ProgressReporterManager: NewProgressReporterManager(), + topLevelContainers: suite.topLevelContainers.Clone(), + suiteNodes: suite.suiteNodes.Clone(), + selectiveLock: &sync.Mutex{}, + }, nil +} + func (suite *Suite) BuildTree() error { // During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered // We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree @@ -328,6 +342,16 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport { return report } +// Only valid in the preview context. In general suite.report only includes +// the specs run by _this_ node - it is only at the end of the suite that +// the parallel reports are aggregated. However in the preview context we run +// in series and +func (suite *Suite) GetPreviewReport() types.Report { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + return suite.report +} + func (suite *Suite) AddReportEntry(entry ReportEntry) error { if suite.phase != PhaseRun { return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index 574f172df..aab42d5fb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -135,6 +135,10 @@ func (w *Writer) Println(a ...interface{}) { func GinkgoLogrFunc(writer *Writer) logr.Logger { return funcr.New(func(prefix, args string) { - writer.Printf("%s\n", args) + if prefix == "" { + writer.Printf("%s\n", args) + } else { + writer.Printf("%s %s\n", prefix, args) + } }, funcr.Options{}) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 1014c7b49..c88fc85a7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -27,6 +27,7 @@ type SuiteConfig struct { FailOnPending bool FailFast bool FlakeAttempts int + MustPassRepeatedly int DryRun bool PollProgressAfter time.Duration PollProgressInterval time.Duration diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 1e0dbfd9d..4fbdc3e9b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -453,8 +453,8 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { return GinkgoError{ - Heading: fmt.Sprintf("No parameters have been passed to the Table Function"), - Message: fmt.Sprintf("The Table Function expected at least 1 parameter"), + Heading: "No parameters have been passed to the Table Function", + Message: "The Table Function expected at least 1 parameter", CodeLocation: cl, DocLink: "table-specs", } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index d048a8ada..aae69b04c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -97,9 +97,7 @@ func (report Report) Add(other Report) Report { report.RunTime = report.EndTime.Sub(report.StartTime) reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) - for i := range report.SpecReports { - reports[i] = report.SpecReports[i] - } + copy(reports, report.SpecReports) offset := len(report.SpecReports) for i := range other.SpecReports { reports[i+offset] = other.SpecReports[i] diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index f895739b8..a37f30828 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.11.0" +const VERSION = "2.13.0" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 1526497b9..4fc45f29c 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,32 @@ +## 1.29.0 + +### Features +- MatchError can now take an optional func(error) bool + description [2b39142] + +## 1.28.1 + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.12.0 to 2.13.0 [635d196] +- Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 [14f8859] +- Bump golang.org/x/net from 0.14.0 to 0.17.0 [d8a6508] +- #703 doc(matchers): HaveEach() doc comment updated [2705bdb] +- Minor typos (#699) [375648c] + +## 1.28.0 + +### Features +- Add VerifyHost handler to ghttp (#698) [0b03b36] + +### Fixes +- Read Body for Newer Responses in HaveHTTPBodyMatcher (#686) [18d6673] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.11.0 to 2.12.0 (#693) [55a33f3] +- Typo in matchers.go (#691) [de68e8f] +- Bump commonmarker from 0.23.9 to 0.23.10 in /docs (#690) [ab17f5e] +- chore: update test matrix for Go 1.21 (#689) [5069017] +- Bump golang.org/x/net from 0.12.0 to 0.14.0 (#688) [babe25f] + ## 1.27.10 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 1fd1803ac..ba082146a 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.27.10" +const GOMEGA_VERSION = "1.29.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -242,7 +242,7 @@ func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Asse Eventually enables making assertions on asynchronous behavior. Eventually checks that an assertion *eventually* passes. Eventually blocks when called and attempts an assertion periodically until it passes or a timeout occurs. Both the timeout and polling interval are configurable as optional arguments. -The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). In addition an optional context.Context can be passed in - Eventually will keep trying until either the timeout epxires or the context is cancelled, whichever comes first. +The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). In addition an optional context.Context can be passed in - Eventually will keep trying until either the timeout expires or the context is cancelled, whichever comes first. Eventually works with any Gomega compatible matcher and supports making assertions against three categories of actual value: @@ -313,13 +313,13 @@ It is important to note that the function passed into Eventually is invoked *syn }).Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in contexts play nicely with paseed-in arguments as long as the context appears first. You can rewrite the above example as: +you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in contexts play nicely with passed-in arguments as long as the context appears first. You can rewrite the above example as: It("fetches the correct count", func(ctx SpecContext) { Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -Either way the context passd to Eventually is also passed to the underlying funciton. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. +Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -349,7 +349,7 @@ For example: will rerun the function until all assertions pass. -You can also pass additional arugments to functions that take a Gomega. The only rule is that the Gomega argument must be first. If you also want to pass the context attached to Eventually you must ensure that is the second argument. For example: +You can also pass additional arguments to functions that take a Gomega. The only rule is that the Gomega argument must be first. If you also want to pass the context attached to Eventually you must ensure that is the second argument. For example: Eventually(func(g Gomega, ctx context.Context, path string, expected ...string){ tok, err := client.GetToken(ctx) diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index bdaf62b56..cd3f431d2 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -88,19 +88,44 @@ func Succeed() types.GomegaMatcher { } // MatchError succeeds if actual is a non-nil error that matches the passed in -// string, error, or matcher. +// string, error, function, or matcher. // // These are valid use-cases: // -// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" -// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) -// Expect(err).Should(MatchError(ContainSubstring("sprocket not found"))) // asserts that edrr.Error() contains substring "sprocket not found" +// When passed a string: +// +// Expect(err).To(MatchError("an error")) +// +// asserts that err.Error() == "an error" +// +// When passed an error: +// +// Expect(err).To(MatchError(SomeError)) +// +// First checks if errors.Is(err, SomeError). +// If that fails then it checks if reflect.DeepEqual(err, SomeError) repeatedly for err and any errors wrapped by err +// +// When passed a matcher: +// +// Expect(err).To(MatchError(ContainSubstring("sprocket not found"))) +// +// the matcher is passed err.Error(). In this case it asserts that err.Error() contains substring "sprocket not found" +// +// When passed a func(err) bool and a description: +// +// Expect(err).To(MatchError(os.IsNotExist, "IsNotExist")) +// +// the function is passed err and matches if the return value is true. The description is required to allow Gomega +// to print a useful error message. // // It is an error for err to be nil or an object that does not implement the // Error interface -func MatchError(expected interface{}) types.GomegaMatcher { +// +// The optional second argument is a description of the error function, if used. This is required when passing a function but is ignored in all other cases. +func MatchError(expected interface{}, functionErrorDescription ...any) types.GomegaMatcher { return &matchers.MatchErrorMatcher{ - Expected: expected, + Expected: expected, + FuncErrDescription: functionErrorDescription, } } @@ -381,7 +406,7 @@ func ContainElements(elements ...interface{}) types.GomegaMatcher { } // HaveEach succeeds if actual solely contains elements that match the passed in element. -// Please note that if actual is empty, HaveEach always will succeed. +// Please note that if actual is empty, HaveEach always will fail. // By default HaveEach() uses Equal() to perform the match, however a // matcher can be passed in instead: // diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go index 6a3dcdc35..d14d9e5fc 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go @@ -11,8 +11,9 @@ import ( ) type HaveHTTPBodyMatcher struct { - Expected interface{} - cachedBody []byte + Expected interface{} + cachedResponse interface{} + cachedBody []byte } func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) { @@ -73,7 +74,7 @@ func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (m // the Reader is closed and it is not readable again in FailureMessage() // or NegatedFailureMessage() func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) { - if matcher.cachedBody != nil { + if matcher.cachedResponse == actual && matcher.cachedBody != nil { return matcher.cachedBody, nil } @@ -91,8 +92,10 @@ func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) { switch a := actual.(type) { case *http.Response: + matcher.cachedResponse = a return body(a) case *httptest.ResponseRecorder: + matcher.cachedResponse = a return body(a.Result()) default: return nil, fmt.Errorf("HaveHTTPBody matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1)) diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index 827475ea5..c539dd389 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -9,10 +9,14 @@ import ( ) type MatchErrorMatcher struct { - Expected interface{} + Expected any + FuncErrDescription []any + isFunc bool } -func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) { + matcher.isFunc = false + if isNil(actual) { return false, fmt.Errorf("Expected an error, got nil") } @@ -42,6 +46,17 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e return actualErr.Error() == expected, nil } + v := reflect.ValueOf(expected) + t := v.Type() + errorInterface := reflect.TypeOf((*error)(nil)).Elem() + if t.Kind() == reflect.Func && t.NumIn() == 1 && t.In(0).Implements(errorInterface) && t.NumOut() == 1 && t.Out(0).Kind() == reflect.Bool { + if len(matcher.FuncErrDescription) == 0 { + return false, fmt.Errorf("MatchError requires an additional description when passed a function") + } + matcher.isFunc = true + return v.Call([]reflect.Value{reflect.ValueOf(actualErr)})[0].Bool(), nil + } + var subMatcher omegaMatcher var hasSubMatcher bool if expected != nil { @@ -57,9 +72,15 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e } func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { + if matcher.isFunc { + return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0])) + } return format.Message(actual, "to match error", matcher.Expected) } func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + if matcher.isFunc { + return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0])) + } return format.Message(actual, "not to match error", matcher.Expected) } diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go index 94c71ac1a..5dfacbb98 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego +//go:build gc && !purego +// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index 63cae9e6f..f1f66230d 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego +//go:build gc && !purego +// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index 025b49897..02ff3d05e 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego -// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego +//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego +// +build !arm64,!s390x,!ppc64le !gc purego package chacha20 diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index a7828345f..000000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index fc04d03e1..27d0e14aa 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -16,8 +16,9 @@ import ( // Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear // in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. -// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't -// appear in the Signature.Format field. +// Unlike key algorithm names, these are not passed to AlgorithmSigner nor +// returned by MultiAlgorithmSigner and don't appear in the Signature.Format +// field. const ( CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" @@ -255,10 +256,17 @@ func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { return nil, errors.New("ssh: signer and cert have different public key") } - if algorithmSigner, ok := signer.(AlgorithmSigner); ok { + switch s := signer.(type) { + case MultiAlgorithmSigner: + return &multiAlgorithmSigner{ + AlgorithmSigner: &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, s}, + supportedAlgorithms: s.Algorithms(), + }, nil + case AlgorithmSigner: return &algorithmOpenSSHCertSigner{ - &openSSHCertSigner{cert, signer}, algorithmSigner}, nil - } else { + &openSSHCertSigner{cert, signer}, s}, nil + default: return &openSSHCertSigner{cert, signer}, nil } } @@ -432,7 +440,9 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { } // SignCert signs the certificate with an authority, setting the Nonce, -// SignatureKey, and Signature fields. +// SignatureKey, and Signature fields. If the authority implements the +// MultiAlgorithmSigner interface the first algorithm in the list is used. This +// is useful if you want to sign with a specific algorithm. func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { c.Nonce = make([]byte, 32) if _, err := io.ReadFull(rand, c.Nonce); err != nil { @@ -440,8 +450,20 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { } c.SignatureKey = authority.PublicKey() - // Default to KeyAlgoRSASHA512 for ssh-rsa signers. - if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + if v, ok := authority.(MultiAlgorithmSigner); ok { + if len(v.Algorithms()) == 0 { + return errors.New("the provided authority has no signature algorithm") + } + // Use the first algorithm in the list. + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), v.Algorithms()[0]) + if err != nil { + return err + } + c.Signature = sig + return nil + } else if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + // Default to KeyAlgoRSASHA512 for ssh-rsa signers. + // TODO: consider using KeyAlgoRSASHA256 as default. sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) if err != nil { return err diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index 409b5ea1d..5c3bc2572 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -71,7 +71,9 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { for auth := AuthMethod(new(noneAuth)); auth != nil; { ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) if err != nil { - return err + // We return the error later if there is no other method left to + // try. + ok = authFailure } if ok == authSuccess { // success @@ -101,6 +103,12 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { } } } + + if auth == nil && err != nil { + // We have an error and there are no other authentication methods to + // try, so we return it. + return err + } } return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) } @@ -217,21 +225,45 @@ func (cb publicKeyCallback) method() string { return "publickey" } -func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { +func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiAlgorithmSigner, string, error) { + var as MultiAlgorithmSigner keyFormat := signer.PublicKey().Type() - // Like in sendKexInit, if the public key implements AlgorithmSigner we - // assume it supports all algorithms, otherwise only the key format one. - as, ok := signer.(AlgorithmSigner) - if !ok { - return algorithmSignerWrapper{signer}, keyFormat + // If the signer implements MultiAlgorithmSigner we use the algorithms it + // support, if it implements AlgorithmSigner we assume it supports all + // algorithms, otherwise only the key format one. + switch s := signer.(type) { + case MultiAlgorithmSigner: + as = s + case AlgorithmSigner: + as = &multiAlgorithmSigner{ + AlgorithmSigner: s, + supportedAlgorithms: algorithmsForKeyFormat(underlyingAlgo(keyFormat)), + } + default: + as = &multiAlgorithmSigner{ + AlgorithmSigner: algorithmSignerWrapper{signer}, + supportedAlgorithms: []string{underlyingAlgo(keyFormat)}, + } + } + + getFallbackAlgo := func() (string, error) { + // Fallback to use if there is no "server-sig-algs" extension or a + // common algorithm cannot be found. We use the public key format if the + // MultiAlgorithmSigner supports it, otherwise we return an error. + if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) { + return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v", + underlyingAlgo(keyFormat), keyFormat, as.Algorithms()) + } + return keyFormat, nil } extPayload, ok := extensions["server-sig-algs"] if !ok { - // If there is no "server-sig-algs" extension, fall back to the key - // format algorithm. - return as, keyFormat + // If there is no "server-sig-algs" extension use the fallback + // algorithm. + algo, err := getFallbackAlgo() + return as, algo, err } // The server-sig-algs extension only carries underlying signature @@ -245,15 +277,22 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as Alg } } - keyAlgos := algorithmsForKeyFormat(keyFormat) + // Filter algorithms based on those supported by MultiAlgorithmSigner. + var keyAlgos []string + for _, algo := range algorithmsForKeyFormat(keyFormat) { + if contains(as.Algorithms(), underlyingAlgo(algo)) { + keyAlgos = append(keyAlgos, algo) + } + } + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) if err != nil { - // If there is no overlap, try the key anyway with the key format - // algorithm, to support servers that fail to list all supported - // algorithms. - return as, keyFormat + // If there is no overlap, return the fallback algorithm to support + // servers that fail to list all supported algorithms. + algo, err := getFallbackAlgo() + return as, algo, err } - return as, algo + return as, algo, nil } func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { @@ -267,10 +306,17 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand return authFailure, nil, err } var methods []string + var errSigAlgo error for _, signer := range signers { pub := signer.PublicKey() - as, algo := pickSignatureAlgorithm(signer, extensions) - + as, algo, err := pickSignatureAlgorithm(signer, extensions) + if err != nil && errSigAlgo == nil { + // If we cannot negotiate a signature algorithm store the first + // error so we can return it to provide a more meaningful message if + // no other signers work. + errSigAlgo = err + continue + } ok, err := validateKey(pub, algo, user, c) if err != nil { return authFailure, nil, err @@ -317,22 +363,12 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand // contain the "publickey" method, do not attempt to authenticate with any // other keys. According to RFC 4252 Section 7, the latter can occur when // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { + if success == authSuccess || !contains(methods, cb.method()) { return success, methods, err } } - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false + return authFailure, methods, errSigAlgo } // validateKey validates the key provided is acceptable to the server. diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index f6bff60dc..edbe63340 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -13,6 +13,7 @@ others. References: + [PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 07a1843e0..70a7369ff 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -461,19 +461,24 @@ func (t *handshakeTransport) sendKexInit() error { isServer := len(t.hostKeys) > 0 if isServer { for _, k := range t.hostKeys { - // If k is an AlgorithmSigner, presume it supports all signature algorithms - // associated with the key format. (Ideally AlgorithmSigner would have a - // method to advertise supported algorithms, but it doesn't. This means that - // adding support for a new algorithm is a breaking change, as we will - // immediately negotiate it even if existing implementations don't support - // it. If that ever happens, we'll have to figure something out.) - // If k is not an AlgorithmSigner, we can only assume it only supports the - // algorithms that matches the key format. (This means that Sign can't pick - // a different default.) + // If k is a MultiAlgorithmSigner, we restrict the signature + // algorithms. If k is a AlgorithmSigner, presume it supports all + // signature algorithms associated with the key format. If k is not + // an AlgorithmSigner, we can only assume it only supports the + // algorithms that matches the key format. (This means that Sign + // can't pick a different default). keyFormat := k.PublicKey().Type() - if _, ok := k.(AlgorithmSigner); ok { + + switch s := k.(type) { + case MultiAlgorithmSigner: + for _, algo := range algorithmsForKeyFormat(keyFormat) { + if contains(s.Algorithms(), underlyingAlgo(algo)) { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) + } + } + case AlgorithmSigner: msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) - } else { + default: msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) } } @@ -642,16 +647,20 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO // message with the server-sig-algs extension if the client supports it. See - // RFC 8308, Sections 2.4 and 3.1. + // RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9. if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { extInfo := &extInfoMsg{ - NumExtensions: 1, - Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)), + NumExtensions: 2, + Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)+4+16+4+1), } extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs")) extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...) extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList)) extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...) + extInfo.Payload = appendInt(extInfo.Payload, len("ping@openssh.com")) + extInfo.Payload = append(extInfo.Payload, "ping@openssh.com"...) + extInfo.Payload = appendInt(extInfo.Payload, 1) + extInfo.Payload = append(extInfo.Payload, "0"...) if err := t.conn.writePacket(Marshal(extInfo)); err != nil { return err } @@ -685,9 +694,16 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { for _, k := range hostKeys { + if s, ok := k.(MultiAlgorithmSigner); ok { + if !contains(s.Algorithms(), underlyingAlgo(algo)) { + continue + } + } + if algo == k.PublicKey().Type() { return algorithmSignerWrapper{k} } + k, ok := k.(AlgorithmSigner) if !ok { continue diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index dac8ee724..ef1bad731 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -11,13 +11,16 @@ import ( "crypto/cipher" "crypto/dsa" "crypto/ecdsa" + "crypto/ed25519" "crypto/elliptic" "crypto/md5" + "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/asn1" "encoding/base64" + "encoding/binary" "encoding/hex" "encoding/pem" "errors" @@ -26,7 +29,6 @@ import ( "math/big" "strings" - "golang.org/x/crypto/ed25519" "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" ) @@ -295,6 +297,18 @@ func MarshalAuthorizedKey(key PublicKey) []byte { return b.Bytes() } +// MarshalPrivateKey returns a PEM block with the private key serialized in the +// OpenSSH format. +func MarshalPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) { + return marshalOpenSSHPrivateKey(key, comment, unencryptedOpenSSHMarshaler) +} + +// MarshalPrivateKeyWithPassphrase returns a PEM block holding the encrypted +// private key serialized in the OpenSSH format. +func MarshalPrivateKeyWithPassphrase(key crypto.PrivateKey, comment string, passphrase []byte) (*pem.Block, error) { + return marshalOpenSSHPrivateKey(key, comment, passphraseProtectedOpenSSHMarshaler(passphrase)) +} + // PublicKey represents a public key using an unspecified algorithm. // // Some PublicKeys provided by this package also implement CryptoPublicKey. @@ -321,7 +335,7 @@ type CryptoPublicKey interface { // A Signer can create signatures that verify against a public key. // -// Some Signers provided by this package also implement AlgorithmSigner. +// Some Signers provided by this package also implement MultiAlgorithmSigner. type Signer interface { // PublicKey returns the associated PublicKey. PublicKey() PublicKey @@ -336,9 +350,9 @@ type Signer interface { // An AlgorithmSigner is a Signer that also supports specifying an algorithm to // use for signing. // -// An AlgorithmSigner can't advertise the algorithms it supports, so it should -// be prepared to be invoked with every algorithm supported by the public key -// format. +// An AlgorithmSigner can't advertise the algorithms it supports, unless it also +// implements MultiAlgorithmSigner, so it should be prepared to be invoked with +// every algorithm supported by the public key format. type AlgorithmSigner interface { Signer @@ -349,6 +363,75 @@ type AlgorithmSigner interface { SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) } +// MultiAlgorithmSigner is an AlgorithmSigner that also reports the algorithms +// supported by that signer. +type MultiAlgorithmSigner interface { + AlgorithmSigner + + // Algorithms returns the available algorithms in preference order. The list + // must not be empty, and it must not include certificate types. + Algorithms() []string +} + +// NewSignerWithAlgorithms returns a signer restricted to the specified +// algorithms. The algorithms must be set in preference order. The list must not +// be empty, and it must not include certificate types. An error is returned if +// the specified algorithms are incompatible with the public key type. +func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (MultiAlgorithmSigner, error) { + if len(algorithms) == 0 { + return nil, errors.New("ssh: please specify at least one valid signing algorithm") + } + var signerAlgos []string + supportedAlgos := algorithmsForKeyFormat(underlyingAlgo(signer.PublicKey().Type())) + if s, ok := signer.(*multiAlgorithmSigner); ok { + signerAlgos = s.Algorithms() + } else { + signerAlgos = supportedAlgos + } + + for _, algo := range algorithms { + if !contains(supportedAlgos, algo) { + return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q", + algo, signer.PublicKey().Type()) + } + if !contains(signerAlgos, algo) { + return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo) + } + } + return &multiAlgorithmSigner{ + AlgorithmSigner: signer, + supportedAlgorithms: algorithms, + }, nil +} + +type multiAlgorithmSigner struct { + AlgorithmSigner + supportedAlgorithms []string +} + +func (s *multiAlgorithmSigner) Algorithms() []string { + return s.supportedAlgorithms +} + +func (s *multiAlgorithmSigner) isAlgorithmSupported(algorithm string) bool { + if algorithm == "" { + algorithm = underlyingAlgo(s.PublicKey().Type()) + } + for _, algo := range s.supportedAlgorithms { + if algorithm == algo { + return true + } + } + return false +} + +func (s *multiAlgorithmSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if !s.isAlgorithmSupported(algorithm) { + return nil, fmt.Errorf("ssh: algorithm %q is not supported: %v", algorithm, s.supportedAlgorithms) + } + return s.AlgorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + type rsaPublicKey rsa.PublicKey func (r *rsaPublicKey) Type() string { @@ -512,6 +595,10 @@ func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) } +func (k *dsaPrivateKey) Algorithms() []string { + return []string{k.PublicKey().Type()} +} + func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { if algorithm != "" && algorithm != k.PublicKey().Type() { return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) @@ -961,13 +1048,16 @@ func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) } +func (s *wrappedSigner) Algorithms() []string { + return algorithmsForKeyFormat(s.pubKey.Type()) +} + func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { if algorithm == "" { algorithm = s.pubKey.Type() } - supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) - if !contains(supportedAlgos, algorithm) { + if !contains(s.Algorithms(), algorithm) { return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) } @@ -1241,28 +1331,106 @@ func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { } } +func unencryptedOpenSSHMarshaler(privKeyBlock []byte) ([]byte, string, string, string, error) { + key := generateOpenSSHPadding(privKeyBlock, 8) + return key, "none", "none", "", nil +} + +func passphraseProtectedOpenSSHMarshaler(passphrase []byte) openSSHEncryptFunc { + return func(privKeyBlock []byte) ([]byte, string, string, string, error) { + salt := make([]byte, 16) + if _, err := rand.Read(salt); err != nil { + return nil, "", "", "", err + } + + opts := struct { + Salt []byte + Rounds uint32 + }{salt, 16} + + // Derive key to encrypt the private key block. + k, err := bcrypt_pbkdf.Key(passphrase, salt, int(opts.Rounds), 32+aes.BlockSize) + if err != nil { + return nil, "", "", "", err + } + + // Add padding matching the block size of AES. + keyBlock := generateOpenSSHPadding(privKeyBlock, aes.BlockSize) + + // Encrypt the private key using the derived secret. + + dst := make([]byte, len(keyBlock)) + key, iv := k[:32], k[32:] + block, err := aes.NewCipher(key) + if err != nil { + return nil, "", "", "", err + } + + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(dst, keyBlock) + + return dst, "aes256-ctr", "bcrypt", string(Marshal(opts)), nil + } +} + +const privateKeyAuthMagic = "openssh-key-v1\x00" + type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) +type openSSHEncryptFunc func(PrivKeyBlock []byte) (ProtectedKeyBlock []byte, cipherName, kdfName, kdfOptions string, err error) + +type openSSHEncryptedPrivateKey struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte +} + +type openSSHPrivateKey struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` +} + +type openSSHRSAPrivateKey struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + Pad []byte `ssh:"rest"` +} + +type openSSHEd25519PrivateKey struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` +} + +type openSSHECDSAPrivateKey struct { + Curve string + Pub []byte + D *big.Int + Comment string + Pad []byte `ssh:"rest"` +} // parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt // function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used // as the decrypt function to parse an unencrypted private key. See // https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { - const magic = "openssh-key-v1\x00" - if len(key) < len(magic) || string(key[:len(magic)]) != magic { + if len(key) < len(privateKeyAuthMagic) || string(key[:len(privateKeyAuthMagic)]) != privateKeyAuthMagic { return nil, errors.New("ssh: invalid openssh private key format") } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } + remaining := key[len(privateKeyAuthMagic):] + var w openSSHEncryptedPrivateKey if err := Unmarshal(remaining, &w); err != nil { return nil, err } @@ -1284,13 +1452,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv return nil, err } - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - + var pk1 openSSHPrivateKey if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { if w.CipherName != "none" { return nil, x509.IncorrectPasswordError @@ -1300,18 +1462,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv switch pk1.Keytype { case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - + var key openSSHRSAPrivateKey if err := Unmarshal(pk1.Rest, &key); err != nil { return nil, err } @@ -1337,13 +1488,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv return pk, nil case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - + var key openSSHEd25519PrivateKey if err := Unmarshal(pk1.Rest, &key); err != nil { return nil, err } @@ -1360,14 +1505,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv copy(pk, key.Priv) return &pk, nil case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - + var key openSSHECDSAPrivateKey if err := Unmarshal(pk1.Rest, &key); err != nil { return nil, err } @@ -1415,6 +1553,131 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv } } +func marshalOpenSSHPrivateKey(key crypto.PrivateKey, comment string, encrypt openSSHEncryptFunc) (*pem.Block, error) { + var w openSSHEncryptedPrivateKey + var pk1 openSSHPrivateKey + + // Random check bytes. + var check uint32 + if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil { + return nil, err + } + + pk1.Check1 = check + pk1.Check2 = check + w.NumKeys = 1 + + // Use a []byte directly on ed25519 keys. + if k, ok := key.(*ed25519.PrivateKey); ok { + key = *k + } + + switch k := key.(type) { + case *rsa.PrivateKey: + E := new(big.Int).SetInt64(int64(k.PublicKey.E)) + // Marshal public key: + // E and N are in reversed order in the public and private key. + pubKey := struct { + KeyType string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + E, k.PublicKey.N, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHRSAPrivateKey{ + N: k.PublicKey.N, + E: E, + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comment: comment, + } + pk1.Keytype = KeyAlgoRSA + pk1.Rest = Marshal(key) + case ed25519.PrivateKey: + pub := make([]byte, ed25519.PublicKeySize) + priv := make([]byte, ed25519.PrivateKeySize) + copy(pub, k[32:]) + copy(priv, k) + + // Marshal public key. + pubKey := struct { + KeyType string + Pub []byte + }{ + KeyAlgoED25519, pub, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHEd25519PrivateKey{ + Pub: pub, + Priv: priv, + Comment: comment, + } + pk1.Keytype = KeyAlgoED25519 + pk1.Rest = Marshal(key) + case *ecdsa.PrivateKey: + var curve, keyType string + switch name := k.Curve.Params().Name; name { + case "P-256": + curve = "nistp256" + keyType = KeyAlgoECDSA256 + case "P-384": + curve = "nistp384" + keyType = KeyAlgoECDSA384 + case "P-521": + curve = "nistp521" + keyType = KeyAlgoECDSA521 + default: + return nil, errors.New("ssh: unhandled elliptic curve " + name) + } + + pub := elliptic.Marshal(k.Curve, k.PublicKey.X, k.PublicKey.Y) + + // Marshal public key. + pubKey := struct { + KeyType string + Curve string + Pub []byte + }{ + keyType, curve, pub, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHECDSAPrivateKey{ + Curve: curve, + Pub: pub, + D: k.D, + Comment: comment, + } + pk1.Keytype = keyType + pk1.Rest = Marshal(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", k) + } + + var err error + // Add padding and encrypt the key if necessary. + w.PrivKeyBlock, w.CipherName, w.KdfName, w.KdfOpts, err = encrypt(Marshal(pk1)) + if err != nil { + return nil, err + } + + b := Marshal(w) + block := &pem.Block{ + Type: "OPENSSH PRIVATE KEY", + Bytes: append([]byte(privateKeyAuthMagic), b...), + } + return block, nil +} + func checkOpenSSHKeyPadding(pad []byte) error { for i, b := range pad { if int(b) != i+1 { @@ -1424,6 +1687,13 @@ func checkOpenSSHKeyPadding(pad []byte) error { return nil } +func generateOpenSSHPadding(block []byte, blockSize int) []byte { + for i, l := 0, len(block); (l+i)%blockSize != 0; i++ { + block = append(block, byte(i+1)) + } + return block +} + // FingerprintLegacyMD5 returns the user presentation of the key's // fingerprint as described by RFC 4716 section 4. func FingerprintLegacyMD5(pubKey PublicKey) string { diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index 922032d95..b55f86056 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -349,6 +349,20 @@ type userAuthGSSAPIError struct { LanguageTag string } +// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9 +const msgPing = 192 + +type pingMsg struct { + Data string `sshtype:"192"` +} + +// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9 +const msgPong = 193 + +type pongMsg struct { + Data string `sshtype:"193"` +} + // typeTags returns the possible type bytes for the given reflect.Type, which // should be a struct. The possible values are separated by a '|' character. func typeTags(structType reflect.Type) (tags []byte) { diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go index 9654c0186..d2d24c635 100644 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -231,6 +231,12 @@ func (m *mux) onePacket() error { return m.handleChannelOpen(packet) case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: return m.handleGlobalPacket(packet) + case msgPing: + var msg pingMsg + if err := Unmarshal(packet, &msg); err != nil { + return fmt.Errorf("failed to unmarshal ping@openssh.com message: %w", err) + } + return m.sendMessage(pongMsg(msg)) } // assume a channel packet. diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index b21322aff..727c71b9c 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -576,7 +576,16 @@ userAuthLoop: if !ok || len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } - + // Ensure the declared public key algo is compatible with the + // decoded one. This check will ensure we don't accept e.g. + // ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public + // key type. The algorithm and public key type must be + // consistent: both must be certificate algorithms, or neither. + if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) { + authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q", + pubKey.Type(), algo) + break + } // Ensure the public key algo and signature algo // are supported. Compare the private key // algorithm name that corresponds to algo with diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6d5e00887..02c88b6b3 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -581,9 +581,11 @@ type serverConn struct { advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client curPushedStreams uint32 // number of open streams initiated by server push + curHandlers uint32 // number of running handler goroutines maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes streams map[uint32]*stream + unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) @@ -981,6 +983,8 @@ func (sc *serverConn) serve() { return case gracefulShutdownMsg: sc.startGracefulShutdownInternal() + case handlerDoneMsg: + sc.handlerDone() default: panic("unknown timer") } @@ -1020,6 +1024,7 @@ var ( idleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) + handlerDoneMsg = new(serverMessage) ) func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } @@ -1892,9 +1897,11 @@ func (st *stream) copyTrailersToHandlerRequest() { // onReadTimeout is run on its own goroutine (from time.AfterFunc) // when the stream's ReadTimeout has fired. func (st *stream) onReadTimeout() { - // Wrap the ErrDeadlineExceeded to avoid callers depending on us - // returning the bare error. - st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) + if st.body != nil { + // Wrap the ErrDeadlineExceeded to avoid callers depending on us + // returning the bare error. + st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) + } } // onWriteTimeout is run on its own goroutine (from time.AfterFunc) @@ -2012,13 +2019,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) - if st.body != nil { - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) - } + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } - go sc.runHandler(rw, req, handler) - return nil + return sc.scheduleHandler(id, rw, req, handler) } func (sc *serverConn) upgradeRequest(req *http.Request) { @@ -2038,6 +2042,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { sc.conn.SetReadDeadline(time.Time{}) } + // This is the first request on the connection, + // so start the handler directly rather than going + // through scheduleHandler. + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) } @@ -2278,8 +2286,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response return &responseWriter{rws: rws} } +type unstartedHandler struct { + streamID uint32 + rw *responseWriter + req *http.Request + handler func(http.ResponseWriter, *http.Request) +} + +// scheduleHandler starts a handler goroutine, +// or schedules one to start as soon as an existing handler finishes. +func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error { + sc.serveG.check() + maxHandlers := sc.advMaxStreams + if sc.curHandlers < maxHandlers { + sc.curHandlers++ + go sc.runHandler(rw, req, handler) + return nil + } + if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) { + return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm)) + } + sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{ + streamID: streamID, + rw: rw, + req: req, + handler: handler, + }) + return nil +} + +func (sc *serverConn) handlerDone() { + sc.serveG.check() + sc.curHandlers-- + i := 0 + maxHandlers := sc.advMaxStreams + for ; i < len(sc.unstartedHandlers); i++ { + u := sc.unstartedHandlers[i] + if sc.streams[u.streamID] == nil { + // This stream was reset before its goroutine had a chance to start. + continue + } + if sc.curHandlers >= maxHandlers { + break + } + sc.curHandlers++ + go sc.runHandler(u.rw, u.req, u.handler) + sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references + } + sc.unstartedHandlers = sc.unstartedHandlers[i:] + if len(sc.unstartedHandlers) == 0 { + sc.unstartedHandlers = nil + } +} + // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { rw.rws.stream.cancelCtx() diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index bd6c128af..ff7da60eb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -7,6 +7,6 @@ package cpu -const cacheLineSize = 32 +const cacheLineSize = 64 func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/vendor/golang.org/x/sys/cpu/hwcap_linux.go index 1d9d91f3e..34e49f955 100644 --- a/vendor/golang.org/x/sys/cpu/hwcap_linux.go +++ b/vendor/golang.org/x/sys/cpu/hwcap_linux.go @@ -5,7 +5,7 @@ package cpu import ( - "io/ioutil" + "os" ) const ( @@ -39,7 +39,7 @@ func readHWCAP() error { return nil } - buf, err := ioutil.ReadFile(procAuxv) + buf, err := os.ReadFile(procAuxv) if err != nil { // e.g. on android /proc/self/auxv is not accessible, so silently // ignore the error and leave Initialized = false. On some diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go deleted file mode 100644 index e07899b90..000000000 --- a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unsafeheader contains header declarations for the Go runtime's -// slice and string implementations. -// -// This package allows x/sys to use types equivalent to -// reflect.SliceHeader and reflect.StringHeader without introducing -// a dependency on the (relatively heavy) "reflect" package. -package unsafeheader - -import ( - "unsafe" -) - -// Slice is the runtime representation of a slice. -// It cannot be used safely or portably and its representation may change in a later release. -type Slice struct { - Data unsafe.Pointer - Len int - Cap int -} - -// String is the runtime representation of a string. -// It cannot be used safely or portably and its representation may change in a later release. -type String struct { - Data unsafe.Pointer - Len int -} diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 39dba6ca6..463c3eff7 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -7,12 +7,6 @@ package unix -import "unsafe" - func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ptrace1(request, pid, addr, data) } - -func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error { - return ptrace1Ptr(request, pid, addr, data) -} diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index 9ea66330a..ed0509a01 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -7,12 +7,6 @@ package unix -import "unsafe" - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { return ENOTSUP } - -func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - return ENOTSUP -} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 9a6e5acac..e94e6cdac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -487,8 +487,6 @@ func Fsync(fd int) error { //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys readlen(fd int, p *byte, np int) (n int, err error) = read -//sys writelen(fd int, p *byte, np int) (n int, err error) = write //sys Dup2(oldfd int, newfd int) (err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 135cc3cd7..59542a897 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -644,189 +644,3 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// sendfile -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 9fa879806..b37310ce9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -47,6 +47,5 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index f17b8c526..d51ec9963 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -47,6 +47,5 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index d4ce988e7..97cb916f2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -343,203 +343,5 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - * TODO(jsing): Update this list for DragonFly. - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Getxattr -// Fgetxattr -// Setxattr -// Fsetxattr -// Removexattr -// Fremovexattr -// Listxattr -// Flistxattr -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index afb10106f..64d1bb4db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -449,197 +449,5 @@ func Dup3(oldfd, newfd, flags int) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdents -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0ba030197..fb4e50224 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -693,10 +693,10 @@ type SockaddrALG struct { func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { // Leave room for NUL byte terminator. - if len(sa.Type) > 13 { + if len(sa.Type) > len(sa.raw.Type)-1 { return nil, 0, EINVAL } - if len(sa.Name) > 63 { + if len(sa.Name) > len(sa.raw.Name)-1 { return nil, 0, EINVAL } @@ -704,17 +704,8 @@ func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Feat = sa.Feature sa.raw.Mask = sa.Mask - typ, err := ByteSliceFromString(sa.Type) - if err != nil { - return nil, 0, err - } - name, err := ByteSliceFromString(sa.Name) - if err != nil { - return nil, 0, err - } - - copy(sa.raw.Type[:], typ) - copy(sa.raw.Name[:], name) + copy(sa.raw.Type[:], sa.Type) + copy(sa.raw.Name[:], sa.Name) return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil } @@ -1988,8 +1979,6 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Unshare(flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys exitThread(code int) (err error) = SYS_EXIT -//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ -//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE //sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV //sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV //sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV @@ -2493,99 +2482,3 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } - -/* - * Unimplemented - */ -// AfsSyscall -// ArchPrctl -// Brk -// ClockNanosleep -// ClockSettime -// Clone -// EpollCtlOld -// EpollPwait -// EpollWaitOld -// Execve -// Fork -// Futex -// GetKernelSyms -// GetMempolicy -// GetRobustList -// GetThreadArea -// Getpmsg -// IoCancel -// IoDestroy -// IoGetevents -// IoSetup -// IoSubmit -// IoprioGet -// IoprioSet -// KexecLoad -// LookupDcookie -// Mbind -// MigratePages -// Mincore -// ModifyLdt -// Mount -// MovePages -// MqGetsetattr -// MqNotify -// MqOpen -// MqTimedreceive -// MqTimedsend -// MqUnlink -// Msgctl -// Msgget -// Msgrcv -// Msgsnd -// Nfsservctl -// Personality -// Pselect6 -// Ptrace -// Putpmsg -// Quotactl -// Readahead -// Readv -// RemapFilePages -// RestartSyscall -// RtSigaction -// RtSigpending -// RtSigqueueinfo -// RtSigreturn -// RtSigsuspend -// RtSigtimedwait -// SchedGetPriorityMax -// SchedGetPriorityMin -// SchedGetparam -// SchedGetscheduler -// SchedRrGetInterval -// SchedSetparam -// SchedYield -// Security -// Semctl -// Semget -// Semop -// Semtimedop -// SetMempolicy -// SetRobustList -// SetThreadArea -// SetTidAddress -// Sigaltstack -// Swapoff -// Swapon -// Sysfs -// TimerCreate -// TimerDelete -// TimerGetoverrun -// TimerGettime -// TimerSettime -// Tkill (obsolete) -// Tuxcall -// Umount2 -// Uselib -// Utimensat -// Vfork -// Vhangup -// Vserver -// _Sysctl diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index ddd1ac853..88162099a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -356,8 +356,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) const ( @@ -371,262 +369,3 @@ const ( func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) { return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags) } - -/* - * Unimplemented - */ -// ____semctl13 -// __clone -// __fhopen40 -// __fhstat40 -// __fhstatvfs140 -// __fstat30 -// __getcwd -// __getfh30 -// __getlogin -// __lstat30 -// __mount50 -// __msgctl13 -// __msync13 -// __ntp_gettime30 -// __posix_chown -// __posix_fchown -// __posix_lchown -// __posix_rename -// __setlogin -// __shmctl13 -// __sigaction_sigtramp -// __sigaltstack14 -// __sigpending14 -// __sigprocmask14 -// __sigsuspend14 -// __sigtimedwait -// __stat30 -// __syscall -// __vfork14 -// _ksem_close -// _ksem_destroy -// _ksem_getvalue -// _ksem_init -// _ksem_open -// _ksem_post -// _ksem_trywait -// _ksem_unlink -// _ksem_wait -// _lwp_continue -// _lwp_create -// _lwp_ctl -// _lwp_detach -// _lwp_exit -// _lwp_getname -// _lwp_getprivate -// _lwp_kill -// _lwp_park -// _lwp_self -// _lwp_setname -// _lwp_setprivate -// _lwp_suspend -// _lwp_unpark -// _lwp_unpark_all -// _lwp_wait -// _lwp_wakeup -// _pset_bind -// _sched_getaffinity -// _sched_getparam -// _sched_setaffinity -// _sched_setparam -// acct -// aio_cancel -// aio_error -// aio_fsync -// aio_read -// aio_return -// aio_suspend -// aio_write -// break -// clock_getres -// clock_gettime -// clock_settime -// compat_09_ogetdomainname -// compat_09_osetdomainname -// compat_09_ouname -// compat_10_omsgsys -// compat_10_osemsys -// compat_10_oshmsys -// compat_12_fstat12 -// compat_12_getdirentries -// compat_12_lstat12 -// compat_12_msync -// compat_12_oreboot -// compat_12_oswapon -// compat_12_stat12 -// compat_13_sigaction13 -// compat_13_sigaltstack13 -// compat_13_sigpending13 -// compat_13_sigprocmask13 -// compat_13_sigreturn13 -// compat_13_sigsuspend13 -// compat_14___semctl -// compat_14_msgctl -// compat_14_shmctl -// compat_16___sigaction14 -// compat_16___sigreturn14 -// compat_20_fhstatfs -// compat_20_fstatfs -// compat_20_getfsstat -// compat_20_statfs -// compat_30___fhstat30 -// compat_30___fstat13 -// compat_30___lstat13 -// compat_30___stat13 -// compat_30_fhopen -// compat_30_fhstat -// compat_30_fhstatvfs1 -// compat_30_getdents -// compat_30_getfh -// compat_30_ntp_gettime -// compat_30_socket -// compat_40_mount -// compat_43_fstat43 -// compat_43_lstat43 -// compat_43_oaccept -// compat_43_ocreat -// compat_43_oftruncate -// compat_43_ogetdirentries -// compat_43_ogetdtablesize -// compat_43_ogethostid -// compat_43_ogethostname -// compat_43_ogetkerninfo -// compat_43_ogetpagesize -// compat_43_ogetpeername -// compat_43_ogetrlimit -// compat_43_ogetsockname -// compat_43_okillpg -// compat_43_olseek -// compat_43_ommap -// compat_43_oquota -// compat_43_orecv -// compat_43_orecvfrom -// compat_43_orecvmsg -// compat_43_osend -// compat_43_osendmsg -// compat_43_osethostid -// compat_43_osethostname -// compat_43_osigblock -// compat_43_osigsetmask -// compat_43_osigstack -// compat_43_osigvec -// compat_43_otruncate -// compat_43_owait -// compat_43_stat43 -// execve -// extattr_delete_fd -// extattr_delete_file -// extattr_delete_link -// extattr_get_fd -// extattr_get_file -// extattr_get_link -// extattr_list_fd -// extattr_list_file -// extattr_list_link -// extattr_set_fd -// extattr_set_file -// extattr_set_link -// extattrctl -// fchroot -// fdatasync -// fgetxattr -// fktrace -// flistxattr -// fork -// fremovexattr -// fsetxattr -// fstatvfs1 -// fsync_range -// getcontext -// getitimer -// getvfsstat -// getxattr -// ktrace -// lchflags -// lchmod -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// lgetxattr -// lio_listio -// listxattr -// llistxattr -// lremovexattr -// lseek -// lsetxattr -// lutimes -// madvise -// mincore -// minherit -// modctl -// mq_close -// mq_getattr -// mq_notify -// mq_open -// mq_receive -// mq_send -// mq_setattr -// mq_timedreceive -// mq_timedsend -// mq_unlink -// msgget -// msgrcv -// msgsnd -// nfssvc -// ntp_adjtime -// pmc_control -// pmc_get_info -// pollts -// preadv -// profil -// pselect -// pset_assign -// pset_create -// pset_destroy -// ptrace -// pwritev -// quotactl -// rasctl -// readv -// reboot -// removexattr -// sa_enable -// sa_preempt -// sa_register -// sa_setconcurrency -// sa_stacks -// sa_yield -// sbrk -// sched_yield -// semconfig -// semget -// semop -// setcontext -// setitimer -// setxattr -// shmat -// shmdt -// shmget -// sstk -// statvfs1 -// swapctl -// sysarch -// syscall -// timer_create -// timer_delete -// timer_getoverrun -// timer_gettime -// timer_settime -// undelete -// utrace -// uuidgen -// vadvise -// vfork -// writev diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index c5f166a11..6f34479b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -326,78 +326,4 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// __getcwd -// __semctl -// __syscall -// __sysctl -// adjfreq -// break -// clock_getres -// clock_gettime -// clock_settime -// closefrom -// execve -// fhopen -// fhstat -// fhstatfs -// fork -// futimens -// getfh -// getgid -// getitimer -// getlogin -// getthrid -// ktrace -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// mincore -// minherit -// mount -// mquery -// msgctl -// msgget -// msgrcv -// msgsnd -// nfssvc -// nnpfspioctl -// preadv -// profil -// pwritev -// quotactl -// readv -// reboot -// renameat -// rfork -// sched_yield -// semget -// semop -// setgroups -// setitimer -// setsockopt -// shmat -// shmctl -// shmdt -// shmget -// sigaction -// sigaltstack -// sigpending -// sigprocmask -// sigreturn -// sigsuspend -// sysarch -// syscall -// threxit -// thrsigdivert -// thrsleep -// thrwakeup -// vfork -// writev diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 72d23575f..b99cfa134 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -698,24 +698,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - // Event Ports type fileObjCookie struct { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 44e72edb4..4596d041c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -192,7 +192,6 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys read(fd int, p []byte) (n int, err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys write(fd int, p []byte) (n int, err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 0787a043b..f9c7f479b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2421,6 +2421,15 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_RISCV_V_GET_CONTROL = 0x46 + PR_RISCV_V_SET_CONTROL = 0x45 + PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 + PR_RISCV_V_VSTATE_CTRL_DEFAULT = 0x0 + PR_RISCV_V_VSTATE_CTRL_INHERIT = 0x10 + PR_RISCV_V_VSTATE_CTRL_MASK = 0x1f + PR_RISCV_V_VSTATE_CTRL_NEXT_MASK = 0xc + PR_RISCV_V_VSTATE_CTRL_OFF = 0x1 + PR_RISCV_V_VSTATE_CTRL_ON = 0x2 PR_SCHED_CORE = 0x3e PR_SCHED_CORE_CREATE = 0x1 PR_SCHED_CORE_GET = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index cfb143001..30aee00a5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index df64f2d59..8ebfa5127 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -327,10 +327,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3025cd5b2..271a21cdc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -333,10 +333,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 09e1ffbef..910c330a3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -323,10 +323,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a45723540..a640798c9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -118,6 +118,8 @@ const ( IUCLC = 0x200 IXOFF = 0x1000 IXON = 0x400 + LASX_CTX_MAGIC = 0x41535801 + LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 @@ -317,10 +319,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index fee7dfb81..0d5925d34 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a5b2373ae..d72a00e0b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5dde82c98..02ba129f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 2e80ea6b3..8daa6dd96 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index a65dcd7cb..63c8fa2f7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -381,10 +381,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index cbd34e3d8..930799ec1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -385,10 +385,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e4afa7a31..8605a7dd7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -385,10 +385,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 44f45a039..95a016f1c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -314,10 +314,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 74733e260..1ae0108f5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -389,10 +389,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f5f3934b1..1bb7c6333 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -428,10 +428,12 @@ const ( SO_NOFCS = 0x27 SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 + SO_PASSPIDFD = 0x55 SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 SO_PEERGROUPS = 0x3d + SO_PEERPIDFD = 0x56 SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x48 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 9a257219d..d1d1d2331 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -817,28 +817,6 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { r0, er := C.dup2(C.int(oldfd), C.int(newfd)) if r0 == -1 && er != nil { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 6de80c20c..f99a18adc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -762,28 +762,6 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callread(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callwrite(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { _, e1 := calldup2(oldfd, newfd) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 4037ccf7a..1cad561e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -725,6 +725,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -733,10 +739,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2410,28 +2412,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2521,14 +2501,6 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } -func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 4baaed0bc..8b8bb2840 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -5,703 +5,586 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) @@ -712,192 +595,160 @@ DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) - GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat64_trampoline_addr(SB)/8, $libc_fstat64_trampoline<>(SB) TEXT libc_fstatat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) - GLOBL ·libc_fstatat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat64_trampoline_addr(SB)/8, $libc_fstatat64_trampoline<>(SB) TEXT libc_fstatfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) - GLOBL ·libc_fstatfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs64_trampoline_addr(SB)/8, $libc_fstatfs64_trampoline<>(SB) TEXT libc_getfsstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) - GLOBL ·libc_getfsstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat64_trampoline_addr(SB)/8, $libc_getfsstat64_trampoline<>(SB) TEXT libc_lstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) - GLOBL ·libc_lstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat64_trampoline_addr(SB)/8, $libc_lstat64_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) - GLOBL ·libc_stat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat64_trampoline_addr(SB)/8, $libc_stat64_trampoline<>(SB) TEXT libc_statfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs64(SB) - GLOBL ·libc_statfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs64_trampoline_addr(SB)/8, $libc_statfs64_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 51d6f3fb2..b18edbd0e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -725,6 +725,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -733,10 +739,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2410,28 +2412,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2521,14 +2501,6 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } -func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index c3b82c037..08362c1ab 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -5,703 +5,586 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) @@ -712,192 +595,160 @@ DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) - GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0eabac7ad..0c67df64a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1642,28 +1642,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index ee313eb00..e6e05d145 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 4c986e448..7508accac 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 555216944..7b56aead4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 67a226fbf..cc623dcaa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index f0b9ddaaa..581849197 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index b57c7050d..6be25cd19 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -40,7 +40,7 @@ func readv(fd int, iovs []Iovec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -55,7 +55,7 @@ func preadv(fd int, iovs []Iovec, off int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -70,7 +70,7 @@ func writev(fd int, iovs []Iovec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -85,7 +85,7 @@ func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -96,7 +96,7 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept4)), 4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 14ab34a56..1ff3aec74 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1734,28 +1734,6 @@ func exitThread(code int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func readv(fd int, iovs []Iovec) (n int, err error) { var _p0 unsafe.Pointer if len(iovs) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 35f499b32..2df3c5bac 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 3cda65b0d..a60556bab 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 1e1fea902..9f788917a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 3b77da110..82a4cb2dc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9ab9abf72..66b3b6456 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 915761eab..c5c4cc112 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2213,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 8e87fdf15..93bfbb328 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 12a7a2160..a107b8fda 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index b19e8aa03..c427de509 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index fb99594c9..60c1a99ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 32cbbbc52..52eba360f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 609d1c598..b40189464 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -436,7 +436,7 @@ func pipe(p *[2]_C_int) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -446,7 +446,7 @@ func pipe(p *[2]_C_int) (n int, err error) { func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe2)), 2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -456,7 +456,7 @@ func pipe2(p *[2]_C_int, flags int) (err error) { func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -471,7 +471,7 @@ func Getcwd(buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -482,7 +482,7 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -492,7 +492,7 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func setgroups(ngid int, gid *_Gid_t) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -503,7 +503,7 @@ func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int32(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -518,7 +518,7 @@ func gethostname(buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -533,7 +533,7 @@ func utimes(path string, times *[2]Timeval) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -548,7 +548,7 @@ func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -559,7 +559,7 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -569,7 +569,7 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -580,7 +580,7 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -591,7 +591,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -602,7 +602,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -612,7 +612,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func acct(path *byte) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -647,7 +647,7 @@ func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -658,7 +658,7 @@ func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -669,7 +669,7 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -684,7 +684,7 @@ func Access(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAccess)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -694,7 +694,7 @@ func Access(path string, mode uint32) (err error) { func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -709,7 +709,7 @@ func Chdir(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -724,7 +724,7 @@ func Chmod(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -739,7 +739,7 @@ func Chown(path string, uid int, gid int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -754,7 +754,7 @@ func Chroot(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChroot)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -764,7 +764,7 @@ func Chroot(path string) (err error) { func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -774,7 +774,7 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -790,7 +790,7 @@ func Creat(path string, mode uint32) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -801,7 +801,7 @@ func Dup(fd int) (nfd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) nfd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -811,7 +811,7 @@ func Dup(fd int) (nfd int, err error) { func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -833,7 +833,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFaccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -843,7 +843,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -853,7 +853,7 @@ func Fchdir(fd int) (err error) { func Fchmod(fd int, mode uint32) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -868,7 +868,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -878,7 +878,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -893,7 +893,7 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -903,7 +903,7 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { func Fdatasync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -913,7 +913,7 @@ func Fdatasync(fd int) (err error) { func Flock(fd int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -924,7 +924,7 @@ func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -934,7 +934,7 @@ func Fpathconf(fd int, name int) (val int, err error) { func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -949,7 +949,7 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -959,7 +959,7 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -974,7 +974,7 @@ func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetdents)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1001,7 +1001,7 @@ func Getpgid(pid int) (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1012,7 +1012,7 @@ func Getpgrp() (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1047,7 +1047,7 @@ func Getpriority(which int, who int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1057,7 +1057,7 @@ func Getpriority(which int, who int) (n int, err error) { func Getrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1067,7 +1067,7 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1078,7 +1078,7 @@ func Getsid(pid int) (sid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) sid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1088,7 +1088,7 @@ func Getsid(pid int) (sid int, err error) { func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1106,7 +1106,7 @@ func Getuid() (uid int) { func Kill(pid int, signum syscall.Signal) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1121,7 +1121,7 @@ func Lchown(path string, uid int, gid int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1141,7 +1141,7 @@ func Link(path string, link string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1151,7 +1151,7 @@ func Link(path string, link string) (err error) { func Listen(s int, backlog int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1166,7 +1166,7 @@ func Lstat(path string, stat *Stat_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1180,7 +1180,7 @@ func Madvise(b []byte, advice int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMadvise)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(advice), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1195,7 +1195,7 @@ func Mkdir(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1210,7 +1210,7 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1225,7 +1225,7 @@ func Mkfifo(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1240,7 +1240,7 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1255,7 +1255,7 @@ func Mknod(path string, mode uint32, dev int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknod)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1270,7 +1270,7 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1284,7 +1284,7 @@ func Mlock(b []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1294,7 +1294,7 @@ func Mlock(b []byte) (err error) { func Mlockall(flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1308,7 +1308,7 @@ func Mprotect(b []byte, prot int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1322,7 +1322,7 @@ func Msync(b []byte, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1336,7 +1336,7 @@ func Munlock(b []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1346,7 +1346,7 @@ func Munlock(b []byte) (err error) { func Munlockall() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1356,7 +1356,7 @@ func Munlockall() (err error) { func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1372,7 +1372,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpen)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1388,7 +1388,7 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1404,7 +1404,7 @@ func Pathconf(path string, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPathconf)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1414,7 +1414,7 @@ func Pathconf(path string, name int) (val int, err error) { func Pause() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1429,7 +1429,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1444,7 +1444,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1459,7 +1459,7 @@ func read(fd int, p []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1479,7 +1479,7 @@ func Readlink(path string, buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procReadlink)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1499,7 +1499,7 @@ func Rename(from string, to string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1519,7 +1519,7 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1534,7 +1534,7 @@ func Rmdir(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1545,7 +1545,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) newoffset = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1556,7 +1556,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1566,7 +1566,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err func Setegid(egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1576,7 +1576,7 @@ func Setegid(egid int) (err error) { func Seteuid(euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1586,7 +1586,7 @@ func Seteuid(euid int) (err error) { func Setgid(gid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1600,7 +1600,7 @@ func Sethostname(p []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1610,7 +1610,7 @@ func Sethostname(p []byte) (err error) { func Setpgid(pid int, pgid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1620,7 +1620,7 @@ func Setpgid(pid int, pgid int) (err error) { func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1630,7 +1630,7 @@ func Setpriority(which int, who int, prio int) (err error) { func Setregid(rgid int, egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1640,7 +1640,7 @@ func Setregid(rgid int, egid int) (err error) { func Setreuid(ruid int, euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1651,7 +1651,7 @@ func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1661,7 +1661,7 @@ func Setsid() (pid int, err error) { func Setuid(uid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1671,7 +1671,7 @@ func Setuid(uid int) (err error) { func Shutdown(s int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1686,7 +1686,7 @@ func Stat(path string, stat *Stat_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1701,7 +1701,7 @@ func Statvfs(path string, vfsstat *Statvfs_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStatvfs)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1721,7 +1721,7 @@ func Symlink(path string, link string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSymlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1731,7 +1731,7 @@ func Symlink(path string, link string) (err error) { func Sync() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1742,7 +1742,7 @@ func Sysconf(which int) (n int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSysconf)), 1, uintptr(which), 0, 0, 0, 0, 0) n = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1753,7 +1753,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) ticks = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1768,7 +1768,7 @@ func Truncate(path string, length int64) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procTruncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1778,7 +1778,7 @@ func Truncate(path string, length int64) (err error) { func Fsync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1788,7 +1788,7 @@ func Fsync(fd int) (err error) { func Ftruncate(fd int, length int64) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1806,7 +1806,7 @@ func Umask(mask int) (oldmask int) { func Uname(buf *Utsname) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1821,7 +1821,7 @@ func Unmount(target string, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1836,7 +1836,7 @@ func Unlink(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1851,7 +1851,7 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1861,7 +1861,7 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { func Ustat(dev int, ubuf *Ustat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1876,7 +1876,7 @@ func Utime(path string, buf *Utimbuf) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1886,7 +1886,7 @@ func Utime(path string, buf *Utimbuf) (err error) { func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1896,7 +1896,7 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1907,7 +1907,7 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1917,7 +1917,7 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1928,7 +1928,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1942,7 +1942,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1953,7 +1953,7 @@ func socket(domain int, typ int, proto int) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1963,7 +1963,7 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1978,7 +1978,7 @@ func write(fd int, p []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1988,7 +1988,7 @@ func write(fd int, p []byte) (n int, err error) { func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1998,7 +1998,7 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2008,7 +2008,7 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2023,7 +2023,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2034,7 +2034,7 @@ func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2045,7 +2045,7 @@ func port_associate(port int, source int, object uintptr, events int, user *byte r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_associate)), 5, uintptr(port), uintptr(source), uintptr(object), uintptr(events), uintptr(unsafe.Pointer(user)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2056,7 +2056,7 @@ func port_dissociate(port int, source int, object uintptr) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_dissociate)), 3, uintptr(port), uintptr(source), uintptr(object), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2067,7 +2067,7 @@ func port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_get)), 3, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(unsafe.Pointer(timeout)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2078,7 +2078,7 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_getn)), 5, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2088,7 +2088,7 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2098,7 +2098,7 @@ func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index c31681743..1d8fe1d4b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -40,17 +40,6 @@ func read(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c9c4ad031..9862853d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -447,4 +447,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 12ff3417c..8901f0f4e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -369,4 +369,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c3fb5e77a..6902c37ee 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -411,4 +411,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 358c847a4..a6d3dff81 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -314,4 +314,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 81c4849b1..b18f3f710 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -308,4 +308,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 202a57e90..0302e5e3d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -431,4 +431,5 @@ const ( SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 1fbceb52d..6693ba4a0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -361,4 +361,5 @@ const ( SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index b4ffb7a20..fd93f4987 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -361,4 +361,5 @@ const ( SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 867985f9b..760ddcadc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -431,4 +431,5 @@ const ( SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index a8cce69ed..cff2b2555 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -438,4 +438,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index d44c5b39d..a4b2405d0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -410,4 +410,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 4214dd9c0..aca54b4e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -410,4 +410,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index ef285c567..9d1738d64 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -315,4 +315,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index e6ed7d637..022878dc8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -376,4 +376,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 92f628ef4..4100a761c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -389,4 +389,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 494493c78..18aa70b42 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1977,7 +1977,7 @@ const ( NFT_MSG_GETFLOWTABLE = 0x17 NFT_MSG_DELFLOWTABLE = 0x18 NFT_MSG_GETRULE_RESET = 0x19 - NFT_MSG_MAX = 0x21 + NFT_MSG_MAX = 0x22 NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -4499,7 +4499,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x145 + NL80211_ATTR_MAX = 0x146 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4869,7 +4869,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x99 + NL80211_CMD_MAX = 0x9a NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5503,7 +5503,7 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 - NL80211_RATE_INFO_MAX = 0x16 + NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 83c69c119..1b4c97c32 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -733,6 +733,10 @@ const ( RISCV_HWPROBE_KEY_IMA_EXT_0 = 0x4 RISCV_HWPROBE_IMA_FD = 0x1 RISCV_HWPROBE_IMA_C = 0x2 + RISCV_HWPROBE_IMA_V = 0x4 + RISCV_HWPROBE_EXT_ZBA = 0x8 + RISCV_HWPROBE_EXT_ZBB = 0x10 + RISCV_HWPROBE_EXT_ZBS = 0x20 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index a52e0331d..9cabbb694 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -22,7 +22,7 @@ import ( // but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { - return "\"\"" + return `""` } n := len(s) hasSpace := false @@ -35,7 +35,7 @@ func EscapeArg(s string) string { } } if hasSpace { - n += 2 + n += 2 // Reserve space for quotes. } if n == len(s) { return s @@ -82,20 +82,68 @@ func EscapeArg(s string) string { // in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, // or any program that uses CommandLineToArgv. func ComposeCommandLine(args []string) string { - var commandLine string - for i := range args { - if i > 0 { - commandLine += " " + if len(args) == 0 { + return "" + } + + // Per https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-commandlinetoargvw: + // “This function accepts command lines that contain a program name; the + // program name can be enclosed in quotation marks or not.” + // + // Unfortunately, it provides no means of escaping interior quotation marks + // within that program name, and we have no way to report them here. + prog := args[0] + mustQuote := len(prog) == 0 + for i := 0; i < len(prog); i++ { + c := prog[i] + if c <= ' ' || (c == '"' && i == 0) { + // Force quotes for not only the ASCII space and tab as described in the + // MSDN article, but also ASCII control characters. + // The documentation for CommandLineToArgvW doesn't say what happens when + // the first argument is not a valid program name, but it empirically + // seems to drop unquoted control characters. + mustQuote = true + break + } + } + var commandLine []byte + if mustQuote { + commandLine = make([]byte, 0, len(prog)+2) + commandLine = append(commandLine, '"') + for i := 0; i < len(prog); i++ { + c := prog[i] + if c == '"' { + // This quote would interfere with our surrounding quotes. + // We have no way to report an error, so just strip out + // the offending character instead. + continue + } + commandLine = append(commandLine, c) } - commandLine += EscapeArg(args[i]) + commandLine = append(commandLine, '"') + } else { + if len(args) == 1 { + // args[0] is a valid command line representing itself. + // No need to allocate a new slice or string for it. + return prog + } + commandLine = []byte(prog) } - return commandLine + + for _, arg := range args[1:] { + commandLine = append(commandLine, ' ') + // TODO(bcmills): since we're already appending to a slice, it would be nice + // to avoid the intermediate allocations of EscapeArg. + // Perhaps we can factor out an appendEscapedArg function. + commandLine = append(commandLine, EscapeArg(arg)...) + } + return string(commandLine) } // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. -// DecomposeCommandLine returns error if commandLine contains NUL. +// DecomposeCommandLine returns an error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil @@ -105,18 +153,35 @@ func DecomposeCommandLine(commandLine string) ([]string, error) { return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") } var argc int32 - argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) + argv, err := commandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string - for _, v := range (*argv)[:argc] { - args = append(args, UTF16ToString((*v)[:])) + for _, p := range unsafe.Slice(argv, argc) { + args = append(args, UTF16PtrToString(p)) } return args, nil } +// CommandLineToArgv parses a Unicode command line string and sets +// argc to the number of parsed arguments. +// +// The returned memory should be freed using a single call to LocalFree. +// +// Note that although the return type of CommandLineToArgv indicates 8192 +// entries of up to 8192 characters each, the actual count of parsed arguments +// may exceed 8192, and the documentation for CommandLineToArgvW does not mention +// any bound on the lengths of the individual argument strings. +// (See https://go.dev/issue/63236.) +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + argp, err := commandLineToArgv(cmd, argc) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(argp)) + return argv, err +} + func CloseOnExec(fd Handle) { SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) } diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index d414ef13b..26be94a8a 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -7,8 +7,6 @@ package windows import ( "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) const ( @@ -1341,21 +1339,14 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() sdLen = min } - var src []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - h.Data = unsafe.Pointer(selfRelativeSD) - h.Len = sdLen - h.Cap = sdLen - + src := unsafe.Slice((*byte)(unsafe.Pointer(selfRelativeSD)), sdLen) + // SECURITY_DESCRIPTOR has pointers in it, which means checkptr expects for it to + // be aligned properly. When we're copying a Windows-allocated struct to a + // Go-allocated one, make sure that the Go allocation is aligned to the + // pointer size. const psize = int(unsafe.Sizeof(uintptr(0))) - - var dst []byte - h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) alloc := make([]uintptr, (sdLen+psize-1)/psize) - h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data - h.Len = sdLen - h.Cap = sdLen - + dst := unsafe.Slice((*byte)(unsafe.Pointer(&alloc[0])), sdLen) copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 67bad0926..35cfc57ca 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -15,8 +15,6 @@ import ( "time" "unicode/utf16" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) type Handle uintptr @@ -240,7 +238,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW //sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW -//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] //sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) @@ -299,12 +297,15 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue //sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId +//sys ClosePseudoConsole(console Handle) = kernel32.ClosePseudoConsole +//sys createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) = kernel32.CreatePseudoConsole //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW //sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW @@ -1667,12 +1668,8 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) { // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - var slice []uint16 - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTUnicodeString) String() string { @@ -1695,12 +1692,8 @@ func NewNTString(s string) (*NTString, error) { // Slice returns a byte slice that aliases the data in the NTString. func (s *NTString) Slice() []byte { - var slice []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTString) String() string { @@ -1752,10 +1745,7 @@ func LoadResourceData(module, resInfo Handle) (data []byte, err error) { if err != nil { return } - h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) - h.Data = unsafe.Pointer(ptr) - h.Len = int(size) - h.Cap = int(size) + data = unsafe.Slice((*byte)(unsafe.Pointer(ptr)), size) return } @@ -1826,3 +1816,17 @@ type PSAPI_WORKING_SET_EX_INFORMATION struct { // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK } + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size Coord, in Handle, out Handle, flags uint32, pconsole *Handle) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), in, out, flags, pconsole) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(pconsole Handle, size Coord) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 88e62a638..b88dc7c85 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -247,6 +247,7 @@ const ( PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x00020016 ) const ( @@ -2139,6 +2140,12 @@ const ( ENABLE_LVB_GRID_WORLDWIDE = 0x10 ) +// Pseudo console related constants used for the flags parameter to +// CreatePseudoConsole. See: https://learn.microsoft.com/en-us/windows/console/createpseudoconsole +const ( + PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 +) + type Coord struct { X int16 Y int16 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5c385580f..8b1688de4 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,7 @@ var ( procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCloseHandle = modkernel32.NewProc("CloseHandle") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") procCreateEventExW = modkernel32.NewProc("CreateEventExW") @@ -202,6 +203,7 @@ var ( procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreatePipe = modkernel32.NewProc("CreatePipe") procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") @@ -328,6 +330,7 @@ var ( procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") procResetEvent = modkernel32.NewProc("ResetEvent") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") @@ -1633,6 +1636,11 @@ func CloseHandle(handle Handle) (err error) { return } +func ClosePseudoConsole(console Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + return +} + func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -1762,6 +1770,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA return } +func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { @@ -2862,6 +2878,14 @@ func ResetEvent(event Handle) (err error) { return } +func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func ResumeThread(thread Handle) (ret uint32, err error) { r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) ret = uint32(r0) @@ -3820,9 +3844,9 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er return } -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { +func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index d738725ca..3674914f7 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. func (r *rudimentaryErrorBackoff) OnError(error) { + now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() - defer r.lastErrorTimeLock.Unlock() - d := time.Since(r.lastErrorTime) - if d < r.minPeriod { - // If the time moves backwards for any reason, do nothing - time.Sleep(r.minPeriod - d) - } + d := now.Sub(r.lastErrorTime) r.lastErrorTime = time.Now() + r.lastErrorTimeLock.Unlock() + + // Do not sleep with the lock held because that causes all callers of HandleError to block. + // We only want the current goroutine to block. + // A negative or zero duration causes time.Sleep to return immediately. + // If the time moves backwards for any reason, do nothing. + time.Sleep(r.minPeriod - d) } // GetCaller returns the caller of the function that calls it. diff --git a/vendor/k8s.io/helm/pkg/chartutil/create.go b/vendor/k8s.io/helm/pkg/chartutil/create.go index 8bab79429..d8480f54f 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/create.go +++ b/vendor/k8s.io/helm/pkg/chartutil/create.go @@ -74,7 +74,7 @@ serviceAccount: create: true # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template - name: + name: "" podSecurityContext: {} # fsGroup: 2000 diff --git a/vendor/k8s.io/helm/pkg/chartutil/requirements.go b/vendor/k8s.io/helm/pkg/chartutil/requirements.go index 4c9713233..0bd16d2eb 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/requirements.go +++ b/vendor/k8s.io/helm/pkg/chartutil/requirements.go @@ -17,7 +17,9 @@ package chartutil import ( "errors" + "fmt" "log" + "regexp" "strings" "time" @@ -219,6 +221,9 @@ func ProcessRequirementsTags(reqs *Requirements, cvals Values) { } +// Validate alias names against this regexp +var aliasRegexp = regexp.MustCompile("^[a-zA-Z0-9-_]+$") + func getAliasDependency(charts []*chart.Chart, aliasChart *Dependency) *chart.Chart { var chartFound chart.Chart for _, existingChart := range charts { @@ -237,6 +242,11 @@ func getAliasDependency(charts []*chart.Chart, aliasChart *Dependency) *chart.Ch chartFound = *existingChart newMetadata := *existingChart.Metadata if aliasChart.Alias != "" { + // Make sure Alias is well-formed + if !aliasRegexp.MatchString(aliasChart.Alias) { + fmt.Printf("Invalid alias in dependency %q. Skipping.", aliasChart.Name) + continue + } newMetadata.Name = aliasChart.Alias } chartFound.Metadata = &newMetadata @@ -286,6 +296,9 @@ func doProcessRequirementsEnabled(c *chart.Chart, v *chart.Config, path string) chartDependencies = append(chartDependencies, chartDependency) } if req.Alias != "" { + if !aliasRegexp.MatchString(req.Alias) { + return fmt.Errorf("illegal alias name in %q", req.Name) + } req.Name = req.Alias } } @@ -396,7 +409,7 @@ func processImportValues(c *chart.Chart) error { if err != nil { return err } - b := cvals.AsMap() + b := make(map[string]interface{}, 0) // import values from each dependency if specified in import-values for _, r := range reqs.Dependencies { // only process raw requirement that is found in chart's dependencies (enabled) @@ -417,42 +430,34 @@ func processImportValues(c *chart.Chart) error { if len(r.ImportValues) > 0 { var outiv []interface{} for _, riv := range r.ImportValues { + nm := make(map[string]string, 0) switch iv := riv.(type) { case map[string]interface{}: - nm := map[string]string{ - "child": iv["child"].(string), - "parent": iv["parent"].(string), - } - outiv = append(outiv, nm) - s := name + "." + nm["child"] - // get child table - vv, err := cvals.Table(s) - if err != nil { - log.Printf("Warning: ImportValues missing table: %v", err) - continue - } - // create value map from child to be merged into parent - vm := pathToMap(nm["parent"], vv.AsMap()) - b = coalesceTables(b, vm, c.Metadata.Name) + nm["child"] = iv["child"].(string) + nm["parent"] = iv["parent"].(string) case string: - nm := map[string]string{ - "child": "exports." + iv, - "parent": ".", - } - outiv = append(outiv, nm) - s := name + "." + nm["child"] - vm, err := cvals.Table(s) - if err != nil { - log.Printf("Warning: ImportValues missing table: %v", err) - continue - } - b = coalesceTables(b, vm.AsMap(), c.Metadata.Name) + nm["child"] = "exports." + iv + nm["parent"] = "." } + + outiv = append(outiv, nm) + s := name + "." + nm["child"] + // get child table + vv, err := cvals.Table(s) + if err != nil { + log.Printf("Warning: ImportValues missing table: %v", err) + continue + } + // create value map from child to be merged into parent + vm := pathToMap(nm["parent"], vv.AsMap()) + b = coalesceTables(b, vm, c.Metadata.Name) + } // set our formatted import values r.ImportValues = outiv } } + b = coalesceTables(b, cvals, c.Metadata.Name) y, err := yaml.Marshal(b) if err != nil { return err diff --git a/vendor/k8s.io/helm/pkg/engine/engine.go b/vendor/k8s.io/helm/pkg/engine/engine.go index b4b6475c9..8c7d36112 100644 --- a/vendor/k8s.io/helm/pkg/engine/engine.go +++ b/vendor/k8s.io/helm/pkg/engine/engine.go @@ -26,11 +26,14 @@ import ( "text/template" "github.com/Masterminds/sprig" + "github.com/pkg/errors" "k8s.io/helm/pkg/chartutil" "k8s.io/helm/pkg/proto/hapi/chart" ) +const recursionMaxNums = 1000 + // Engine is an implementation of 'cmd/tiller/environment'.Engine that uses Go templates. type Engine struct { // FuncMap contains the template functions that will be passed to each @@ -144,12 +147,23 @@ func (e *Engine) alterFuncMap(t *template.Template, referenceTpls map[string]ren funcMap[k] = v } + includedNames := make(map[string]int) + // Add the 'include' function here so we can close over t. funcMap["include"] = func(name string, data interface{}) (string, error) { buf := bytes.NewBuffer(nil) + if v, ok := includedNames[name]; ok { + if v > recursionMaxNums { + return "", errors.Wrapf(fmt.Errorf("unable to execute template"), "rendering template has a nested reference name: %s", name) + } + includedNames[name]++ + } else { + includedNames[name] = 1 + } if err := t.ExecuteTemplate(buf, name, data); err != nil { return "", err } + includedNames[name]-- return buf.String(), nil } diff --git a/vendor/k8s.io/helm/pkg/version/version.go b/vendor/k8s.io/helm/pkg/version/version.go index d58be0446..74accb402 100644 --- a/vendor/k8s.io/helm/pkg/version/version.go +++ b/vendor/k8s.io/helm/pkg/version/version.go @@ -26,7 +26,7 @@ var ( // Increment major number for new feature additions and behavioral changes. // Increment minor number for bug fixes and performance enhancements. // Increment patch number for critical fixes to existing releases. - Version = "v2.16" + Version = "v2.17" // BuildMetadata is extra build time data BuildMetadata = "unreleased" diff --git a/vendor/modules.txt b/vendor/modules.txt index 90ff514b3..0559552a1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# github.com/BurntSushi/toml v1.0.0 +# github.com/BurntSushi/toml v1.2.1 ## explicit; go 1.16 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal @@ -27,13 +27,13 @@ github.com/bronze1man/yaml2json/y2jLib # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/cyphar/filepath-securejoin v0.2.2 -## explicit +# github.com/cyphar/filepath-securejoin v0.2.3 +## explicit; go 1.13 github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/emicklei/go-restful/v3 v3.10.1 +# github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log @@ -57,10 +57,10 @@ github.com/fluent/fluent-operator/v2/pkg/utils # github.com/fsnotify/fsnotify v1.6.0 ## explicit; go 1.16 github.com/fsnotify/fsnotify -# github.com/gardener/etcd-druid v0.19.2 +# github.com/gardener/etcd-druid v0.20.1 ## explicit; go 1.20 github.com/gardener/etcd-druid/api/v1alpha1 -# github.com/gardener/gardener v1.81.0 +# github.com/gardener/gardener v1.84.0 ## explicit; go 1.21 github.com/gardener/gardener/.github github.com/gardener/gardener/.github/ISSUE_TEMPLATE @@ -132,8 +132,8 @@ github.com/gardener/gardener/third_party/controller-runtime/pkg/apiutil # github.com/gardener/hvpa-controller/api v0.5.0 ## explicit; go 1.15 github.com/gardener/hvpa-controller/api/v1alpha1 -# github.com/gardener/machine-controller-manager v0.48.1 -## explicit; go 1.19 +# github.com/gardener/machine-controller-manager v0.50.0 +## explicit; go 1.20 github.com/gardener/machine-controller-manager/pkg/apis/machine github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1 # github.com/ghodss/yaml v1.0.0 @@ -223,7 +223,7 @@ github.com/google/gnostic-models/extensions github.com/google/gnostic-models/jsonschema github.com/google/gnostic-models/openapiv2 github.com/google/gnostic-models/openapiv3 -# github.com/google/go-cmp v0.5.9 +# github.com/google/go-cmp v0.6.0 ## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff @@ -240,7 +240,7 @@ github.com/google/pprof/profile # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/hashicorp/errwrap v1.0.0 +# github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap # github.com/hashicorp/go-multierror v1.1.1 @@ -255,6 +255,15 @@ github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap +# github.com/ironcore-dev/vgopath v0.1.3 +## explicit; go 1.20 +github.com/ironcore-dev/vgopath +github.com/ironcore-dev/vgopath/internal/cmd/version +github.com/ironcore-dev/vgopath/internal/cmd/vgopath +github.com/ironcore-dev/vgopath/internal/cmd/vgopath/exec +github.com/ironcore-dev/vgopath/internal/link +github.com/ironcore-dev/vgopath/internal/module +github.com/ironcore-dev/vgopath/internal/version # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern @@ -297,7 +306,7 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/ginkgo/v2 v2.11.0 +# github.com/onsi/ginkgo/v2 v2.13.0 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -319,7 +328,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.27.10 +# github.com/onsi/gomega v1.29.0 ## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format @@ -385,13 +394,12 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.13.0 +# golang.org/x/crypto v0.14.0 ## explicit; go 1.17 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field -golang.org/x/crypto/ed25519 golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/pbkdf2 @@ -407,7 +415,7 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.15.0 +# golang.org/x/net v0.17.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/html @@ -423,15 +431,14 @@ golang.org/x/net/proxy ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sys v0.12.0 +# golang.org/x/sys v0.13.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs -golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.12.0 +# golang.org/x/term v0.13.0 ## explicit; go 1.17 golang.org/x/term # golang.org/x/text v0.13.0 @@ -559,7 +566,7 @@ istio.io/api/type/v1beta1 ## explicit; go 1.18 istio.io/client-go/pkg/apis/networking/v1alpha3 istio.io/client-go/pkg/apis/networking/v1beta1 -# k8s.io/api v0.28.2 +# k8s.io/api v0.28.3 ## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -615,13 +622,13 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.28.2 +# k8s.io/apiextensions-apiserver v0.28.3 ## explicit; go 1.20 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme -# k8s.io/apimachinery v0.28.2 +# k8s.io/apimachinery v0.28.3 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -682,7 +689,7 @@ k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/autoscaler/vertical-pod-autoscaler v0.14.0 ## explicit; go 1.19 k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1 -# k8s.io/client-go v0.28.2 +# k8s.io/client-go v0.28.3 ## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -823,7 +830,7 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.28.2 +# k8s.io/code-generator v0.28.3 ## explicit; go 1.20 k8s.io/code-generator k8s.io/code-generator/cmd/applyconfiguration-gen @@ -862,7 +869,7 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.28.2 +# k8s.io/component-base v0.28.3 ## explicit; go 1.20 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 @@ -879,7 +886,7 @@ k8s.io/gengo/generator k8s.io/gengo/namer k8s.io/gengo/parser k8s.io/gengo/types -# k8s.io/helm v2.16.1+incompatible +# k8s.io/helm v2.17.0+incompatible ## explicit k8s.io/helm/pkg/chartutil k8s.io/helm/pkg/engine @@ -903,7 +910,7 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-aggregator v0.28.2 +# k8s.io/kube-aggregator v0.28.3 ## explicit; go 1.20 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 @@ -928,10 +935,10 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubelet v0.28.2 +# k8s.io/kubelet v0.28.3 ## explicit; go 1.20 k8s.io/kubelet/pkg/apis -# k8s.io/metrics v0.28.2 +# k8s.io/metrics v0.28.3 ## explicit; go 1.20 k8s.io/metrics/pkg/apis/metrics k8s.io/metrics/pkg/apis/metrics/v1beta1 @@ -946,7 +953,7 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/controller-runtime v0.16.2 +# sigs.k8s.io/controller-runtime v0.16.3 ## explicit; go 1.20 sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/cache/internal @@ -985,7 +992,7 @@ sigs.k8s.io/controller-runtime/pkg/source sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics -# sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20230707163321-8a64e5f3bd78 +# sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20231015215740-bf15e44028f9 ## explicit; go 1.20 sigs.k8s.io/controller-runtime/tools/setup-envtest sigs.k8s.io/controller-runtime/tools/setup-envtest/env diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index d8446e85b..5410e1cdd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -22,8 +22,10 @@ import ( "net/http" "time" + "golang.org/x/exp/maps" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -121,6 +123,10 @@ type Informer interface { HasSynced() bool } +// AllNamespaces should be used as the map key to deliminate namespace settings +// that apply to all namespaces that themselves do not have explicit settings. +const AllNamespaces = metav1.NamespaceAll + // Options are the optional arguments for creating a new Cache object. type Options struct { // HTTPClient is the http client to use for the REST client @@ -172,6 +178,11 @@ type Options struct { // the namespaces in here will be watched and it will by used to default // ByObject.Namespaces for all objects if that is nil. // + // It is possible to have specific Config for just some namespaces + // but cache all namespaces by using the AllNamespaces const as the map key. + // This will then include all namespaces that do not have a more specific + // setting. + // // The options in the Config that are nil will be defaulted from // the respective Default* settings. DefaultNamespaces map[string]Config @@ -214,6 +225,11 @@ type ByObject struct { // Settings in the map value that are unset will be defaulted. // Use an empty value for the specific setting to prevent that. // + // It is possible to have specific Config for just some namespaces + // but cache all namespaces by using the AllNamespaces const as the map key. + // This will then include all namespaces that do not have a more specific + // setting. + // // A nil map allows to default this to the cache's DefaultNamespaces setting. // An empty map prevents this and means that all namespaces will be cached. // @@ -392,6 +408,9 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { for namespace, cfg := range opts.DefaultNamespaces { cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts)) + if namespace == metav1.NamespaceAll { + cfg.FieldSelector = fields.AndSelectors(appendIfNotNil(namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), cfg.FieldSelector)...) + } opts.DefaultNamespaces[namespace] = cfg } @@ -418,6 +437,15 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { // 3. Default from the global defaults config = defaultConfig(config, optionDefaultsToConfig(&opts)) + if namespace == metav1.NamespaceAll { + config.FieldSelector = fields.AndSelectors( + appendIfNotNil( + namespaceAllSelector(maps.Keys(byObject.Namespaces)), + config.FieldSelector, + )..., + ) + } + byObject.Namespaces[namespace] = config } @@ -457,3 +485,21 @@ func defaultConfig(toDefault, defaultFrom Config) Config { return toDefault } + +func namespaceAllSelector(namespaces []string) fields.Selector { + selectors := make([]fields.Selector, 0, len(namespaces)-1) + for _, namespace := range namespaces { + if namespace != metav1.NamespaceAll { + selectors = append(selectors, fields.OneTermNotEqualSelector("metadata.namespace", namespace)) + } + } + + return fields.AndSelectors(selectors...) +} + +func appendIfNotNil[T comparable](a, b T) []T { + if b != *new(T) { + return []T{a, b} + } + return []T{a} +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index 5b20195d7..87c31a7c0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -23,6 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" toolscache "k8s.io/client-go/tools/cache" @@ -210,6 +211,9 @@ func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj cache, ok := c.namespaceToCache[key.Namespace] if !ok { + if global, hasGlobal := c.namespaceToCache[metav1.NamespaceAll]; hasGlobal { + return global.Get(ctx, key, obj, opts...) + } return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) } return cache.Get(ctx, key, obj, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md index 40379c9b8..1bdeebbc5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md +++ b/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest/README.md @@ -4,7 +4,7 @@ This is a small tool that manages binaries for envtest. It can be used to download new binaries, list currently installed and available ones, and clean up versions. -To use it, just go-install it on 1.16+ (it's a separate, self-contained +To use it, just go-install it on 1.19+ (it's a separate, self-contained module): ```shell @@ -45,7 +45,7 @@ setup-envtest sideload 1.16.2 < downloaded-envtest.tar.gz ## Where does it put all those binaries? By default, binaries are stored in a subdirectory of an OS-specific data -directory, as per the OS's conventions. +directory, as per the OS's conventions. On Linux, this is `$XDG_DATA_HOME`; on Windows, `%LocalAppData`; and on OSX, `~/Library/Application Support`. From 627168d8794936186795419814ecaa63d12046fb Mon Sep 17 00:00:00 2001 From: Rafael Franzke Date: Wed, 11 Oct 2023 16:20:20 +0200 Subject: [PATCH 2/8] Rework directory restructure In the future, the `pkg/controller/operatingsystemconfig/generator` package can be deleted entirely --- .../generator/gardenlinux/additional_files.go | 8 +++-- .../generator/gardenlinux/cgroups.go | 14 +++++---- .../generator/generator.go | 15 ++------- .../generator/generator_suite_test.go | 0 .../generator/generator_test.go | 9 +++--- .../templates/cloud-init.gardenlinux.template | 0 .../generator/testfiles/cloud-init | 0 .../generator/testfiles/containerd-bootstrap | 0 .../generator/testfiles/containerd-reconcile | 0 .../generator/testfiles/docker-bootstrap | 0 .../generator/testfiles/docker-reconcile | 0 .../generator/testfiles/embed.go | 4 ++- .../testfiles/memoryone-containerd-bootstrap | 0 .../memoryone-containerd-bootstrap-defaults | 0 .../testfiles/memoryone-docker-bootstrap | 0 pkg/{generator => }/gardenlinux/constants.go | 12 ++++--- .../scripts/containerd_cgroup_driver.sh | 0 .../gardenlinux/scripts/g_functions.sh | 0 .../scripts/kubelet_cgroup_driver.sh | 0 .../contants.go => memoryone/constants.go} | 2 +- pkg/{generator => }/memoryone/values.go | 31 ++++++++++++------- 21 files changed, 51 insertions(+), 44 deletions(-) rename pkg/{ => controller/operatingsystemconfig}/generator/gardenlinux/additional_files.go (83%) rename pkg/{ => controller/operatingsystemconfig}/generator/gardenlinux/cgroups.go (79%) rename pkg/{ => controller/operatingsystemconfig}/generator/generator.go (86%) rename pkg/{ => controller/operatingsystemconfig}/generator/generator_suite_test.go (100%) rename pkg/{ => controller/operatingsystemconfig}/generator/generator_test.go (96%) rename pkg/{ => controller/operatingsystemconfig}/generator/templates/cloud-init.gardenlinux.template (100%) rename pkg/{ => controller/operatingsystemconfig}/generator/testfiles/cloud-init (100%) rename pkg/{ => controller/operatingsystemconfig}/generator/testfiles/containerd-bootstrap (100%) rename pkg/{ => controller/operatingsystemconfig}/generator/testfiles/containerd-reconcile (100%) rename pkg/{ => controller/operatingsystemconfig}/generator/testfiles/docker-bootstrap (100%) rename pkg/{ => controller/operatingsystemconfig}/generator/testfiles/docker-reconcile (100%) rename pkg/{ => controller/operatingsystemconfig}/generator/testfiles/embed.go (97%) rename pkg/{ => controller/operatingsystemconfig}/generator/testfiles/memoryone-containerd-bootstrap (100%) rename pkg/{ => controller/operatingsystemconfig}/generator/testfiles/memoryone-containerd-bootstrap-defaults (100%) rename pkg/{ => controller/operatingsystemconfig}/generator/testfiles/memoryone-docker-bootstrap (100%) rename pkg/{generator => }/gardenlinux/constants.go (84%) rename pkg/{generator => }/gardenlinux/scripts/containerd_cgroup_driver.sh (100%) rename pkg/{generator => }/gardenlinux/scripts/g_functions.sh (100%) rename pkg/{generator => }/gardenlinux/scripts/kubelet_cgroup_driver.sh (100%) rename pkg/{generator/memoryone/contants.go => memoryone/constants.go} (90%) rename pkg/{generator => }/memoryone/values.go (68%) diff --git a/pkg/generator/gardenlinux/additional_files.go b/pkg/controller/operatingsystemconfig/generator/gardenlinux/additional_files.go similarity index 83% rename from pkg/generator/gardenlinux/additional_files.go rename to pkg/controller/operatingsystemconfig/generator/gardenlinux/additional_files.go index 180bdc818..f3edab486 100644 --- a/pkg/generator/gardenlinux/additional_files.go +++ b/pkg/controller/operatingsystemconfig/generator/gardenlinux/additional_files.go @@ -18,6 +18,8 @@ import ( "path/filepath" "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/generator" + + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/gardenlinux" ) var ( @@ -31,15 +33,15 @@ func GetAdditionalScripts() ([]*generator.File, error) { files := []*generator.File{} for _, f := range fileList { - scriptContent, err := templates.ReadFile(filepath.Join("scripts", f)) + scriptContent, err := gardenlinux.Templates.ReadFile(filepath.Join("scripts", f)) if err != nil { return nil, err } additionalScript := &generator.File{ - Path: filepath.Join(scriptLocation, f), + Path: filepath.Join(gardenlinux.ScriptLocation, f), Content: scriptContent, - Permissions: &scriptPermissions, + Permissions: &gardenlinux.ScriptPermissions, } files = append(files, additionalScript) diff --git a/pkg/generator/gardenlinux/cgroups.go b/pkg/controller/operatingsystemconfig/generator/gardenlinux/cgroups.go similarity index 79% rename from pkg/generator/gardenlinux/cgroups.go rename to pkg/controller/operatingsystemconfig/generator/gardenlinux/cgroups.go index 1c3cce02d..8db3a42d4 100644 --- a/pkg/generator/gardenlinux/cgroups.go +++ b/pkg/controller/operatingsystemconfig/generator/gardenlinux/cgroups.go @@ -18,6 +18,8 @@ import ( "path/filepath" "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/generator" + + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/gardenlinux" ) var ( @@ -33,14 +35,14 @@ ExecStartPre=/opt/gardener/bin/kubelet_cgroup_driver.sh ) func ContainerdCgroupDriver() ([]*generator.File, []*generator.Unit, error) { - containerdConfigureScriptContent, err := templates.ReadFile(filepath.Join("scripts", "containerd_cgroup_driver.sh")) + containerdConfigureScriptContent, err := gardenlinux.Templates.ReadFile(filepath.Join("scripts", "containerd_cgroup_driver.sh")) if err != nil { return nil, nil, err } containerdConfigureScript := &generator.File{ - Path: filepath.Join(scriptLocation, "containerd_cgroup_driver.sh"), + Path: filepath.Join(gardenlinux.ScriptLocation, "containerd_cgroup_driver.sh"), Content: containerdConfigureScriptContent, - Permissions: &scriptPermissions, + Permissions: &gardenlinux.ScriptPermissions, } containerdDropin := &generator.Unit{ Name: "containerd.service", @@ -56,14 +58,14 @@ func ContainerdCgroupDriver() ([]*generator.File, []*generator.Unit, error) { } func KubeletCgroupDriver() ([]*generator.File, []*generator.Unit, error) { - kubeletConfigureScriptContent, err := templates.ReadFile(filepath.Join("scripts", "kubelet_cgroup_driver.sh")) + kubeletConfigureScriptContent, err := gardenlinux.Templates.ReadFile(filepath.Join("scripts", "kubelet_cgroup_driver.sh")) if err != nil { return nil, nil, err } kubeletConfigureScript := &generator.File{ - Path: filepath.Join(scriptLocation, "kubelet_cgroup_driver.sh"), + Path: filepath.Join(gardenlinux.ScriptLocation, "kubelet_cgroup_driver.sh"), Content: kubeletConfigureScriptContent, - Permissions: &scriptPermissions, + Permissions: &gardenlinux.ScriptPermissions, } kubeletDropin := &generator.Unit{ Name: "kubelet.service", diff --git a/pkg/generator/generator.go b/pkg/controller/operatingsystemconfig/generator/generator.go similarity index 86% rename from pkg/generator/generator.go rename to pkg/controller/operatingsystemconfig/generator/generator.go index 00c29de96..99372c792 100644 --- a/pkg/generator/generator.go +++ b/pkg/controller/operatingsystemconfig/generator/generator.go @@ -24,8 +24,8 @@ import ( "github.com/go-logr/logr" runtimeutils "k8s.io/apimachinery/pkg/util/runtime" - "github.com/gardener/gardener-extension-os-gardenlinux/pkg/generator/gardenlinux" - "github.com/gardener/gardener-extension-os-gardenlinux/pkg/generator/memoryone" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/controller/operatingsystemconfig/generator/gardenlinux" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/memoryone" ) var ( @@ -52,7 +52,6 @@ func init() { cloudInitGenerator = &GardenLinuxCloudInitGenerator{ cloudInitGenerator: ostemplate.NewCloudInitGenerator(cloudInitTemplate, ostemplate.DefaultUnitsPath, cmd, additionalValues), } - } // additionalValues provides additional values to the cloud-init template @@ -68,18 +67,10 @@ func additionalValues(osc *extensionsv1alpha1.OperatingSystemConfig) (map[string return values, nil } -// isSupportedOscType checks if the OperatingSystemConfig's type is one of those for which this extension is responsbile -func isSupportedOscType(osc *generator.OperatingSystemConfig) bool { - return osc.Object.Spec.Type == gardenlinux.OSTypeGardenLinux || - osc.Object.Spec.Type == memoryone.OSTypeMemoryOneGardenLinux -} - // Generate generates a Garden Linux specific cloud-init script from the given OperatingSystemConfig. func (g *GardenLinuxCloudInitGenerator) Generate(logger logr.Logger, osc *generator.OperatingSystemConfig) ([]byte, *string, error) { - // we are only setting this up if the worker pool is configured with containerd - if isSupportedOscType(osc) && - osc.Object.Spec.Purpose == extensionsv1alpha1.OperatingSystemConfigPurposeReconcile && + if osc.Object.Spec.Purpose == extensionsv1alpha1.OperatingSystemConfigPurposeReconcile && osc.CRI != nil && osc.CRI.Name == extensionsv1alpha1.CRINameContainerD { // add additional scripts that are provided in the embedded fs diff --git a/pkg/generator/generator_suite_test.go b/pkg/controller/operatingsystemconfig/generator/generator_suite_test.go similarity index 100% rename from pkg/generator/generator_suite_test.go rename to pkg/controller/operatingsystemconfig/generator/generator_suite_test.go diff --git a/pkg/generator/generator_test.go b/pkg/controller/operatingsystemconfig/generator/generator_test.go similarity index 96% rename from pkg/generator/generator_test.go rename to pkg/controller/operatingsystemconfig/generator/generator_test.go index e278088e7..1134cc20b 100644 --- a/pkg/generator/generator_test.go +++ b/pkg/controller/operatingsystemconfig/generator/generator_test.go @@ -25,10 +25,9 @@ import ( "k8s.io/utils/pointer" "github.com/gardener/gardener-extension-os-gardenlinux/pkg/apis/memoryonegardenlinux/v1alpha1" - gardenlinux_generator "github.com/gardener/gardener-extension-os-gardenlinux/pkg/generator" - "github.com/gardener/gardener-extension-os-gardenlinux/pkg/generator/gardenlinux" - "github.com/gardener/gardener-extension-os-gardenlinux/pkg/generator/memoryone" - "github.com/gardener/gardener-extension-os-gardenlinux/pkg/generator/testfiles" + gardenlinux_generator "github.com/gardener/gardener-extension-os-gardenlinux/pkg/controller/operatingsystemconfig/generator" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/controller/operatingsystemconfig/generator/testfiles" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/memoryone" ) type byteSlice []byte @@ -103,7 +102,7 @@ dataKey: token`) Spec: extensionsv1alpha1.OperatingSystemConfigSpec{ Purpose: extensionsv1alpha1.OperatingSystemConfigPurposeProvision, DefaultSpec: extensionsv1alpha1.DefaultSpec{ - Type: gardenlinux.OSTypeGardenLinux, + Type: "gardenlinux", }, }, }, diff --git a/pkg/generator/templates/cloud-init.gardenlinux.template b/pkg/controller/operatingsystemconfig/generator/templates/cloud-init.gardenlinux.template similarity index 100% rename from pkg/generator/templates/cloud-init.gardenlinux.template rename to pkg/controller/operatingsystemconfig/generator/templates/cloud-init.gardenlinux.template diff --git a/pkg/generator/testfiles/cloud-init b/pkg/controller/operatingsystemconfig/generator/testfiles/cloud-init similarity index 100% rename from pkg/generator/testfiles/cloud-init rename to pkg/controller/operatingsystemconfig/generator/testfiles/cloud-init diff --git a/pkg/generator/testfiles/containerd-bootstrap b/pkg/controller/operatingsystemconfig/generator/testfiles/containerd-bootstrap similarity index 100% rename from pkg/generator/testfiles/containerd-bootstrap rename to pkg/controller/operatingsystemconfig/generator/testfiles/containerd-bootstrap diff --git a/pkg/generator/testfiles/containerd-reconcile b/pkg/controller/operatingsystemconfig/generator/testfiles/containerd-reconcile similarity index 100% rename from pkg/generator/testfiles/containerd-reconcile rename to pkg/controller/operatingsystemconfig/generator/testfiles/containerd-reconcile diff --git a/pkg/generator/testfiles/docker-bootstrap b/pkg/controller/operatingsystemconfig/generator/testfiles/docker-bootstrap similarity index 100% rename from pkg/generator/testfiles/docker-bootstrap rename to pkg/controller/operatingsystemconfig/generator/testfiles/docker-bootstrap diff --git a/pkg/generator/testfiles/docker-reconcile b/pkg/controller/operatingsystemconfig/generator/testfiles/docker-reconcile similarity index 100% rename from pkg/generator/testfiles/docker-reconcile rename to pkg/controller/operatingsystemconfig/generator/testfiles/docker-reconcile diff --git a/pkg/generator/testfiles/embed.go b/pkg/controller/operatingsystemconfig/generator/testfiles/embed.go similarity index 97% rename from pkg/generator/testfiles/embed.go rename to pkg/controller/operatingsystemconfig/generator/testfiles/embed.go index a8763b27a..2ae8775f3 100644 --- a/pkg/generator/testfiles/embed.go +++ b/pkg/controller/operatingsystemconfig/generator/testfiles/embed.go @@ -14,7 +14,9 @@ package testfiles -import "embed" +import ( + "embed" +) // Files contains the contents of the testfiles directory // diff --git a/pkg/generator/testfiles/memoryone-containerd-bootstrap b/pkg/controller/operatingsystemconfig/generator/testfiles/memoryone-containerd-bootstrap similarity index 100% rename from pkg/generator/testfiles/memoryone-containerd-bootstrap rename to pkg/controller/operatingsystemconfig/generator/testfiles/memoryone-containerd-bootstrap diff --git a/pkg/generator/testfiles/memoryone-containerd-bootstrap-defaults b/pkg/controller/operatingsystemconfig/generator/testfiles/memoryone-containerd-bootstrap-defaults similarity index 100% rename from pkg/generator/testfiles/memoryone-containerd-bootstrap-defaults rename to pkg/controller/operatingsystemconfig/generator/testfiles/memoryone-containerd-bootstrap-defaults diff --git a/pkg/generator/testfiles/memoryone-docker-bootstrap b/pkg/controller/operatingsystemconfig/generator/testfiles/memoryone-docker-bootstrap similarity index 100% rename from pkg/generator/testfiles/memoryone-docker-bootstrap rename to pkg/controller/operatingsystemconfig/generator/testfiles/memoryone-docker-bootstrap diff --git a/pkg/generator/gardenlinux/constants.go b/pkg/gardenlinux/constants.go similarity index 84% rename from pkg/generator/gardenlinux/constants.go rename to pkg/gardenlinux/constants.go index 4a658e490..038a8a1e8 100644 --- a/pkg/generator/gardenlinux/constants.go +++ b/pkg/gardenlinux/constants.go @@ -14,18 +14,20 @@ package gardenlinux -import "embed" +import ( + "embed" +) var ( //go:embed scripts/* - templates embed.FS + Templates embed.FS - scriptPermissions = int32(0755) + ScriptPermissions = int32(0755) ) const ( - // scriptLocation is the location that Gardener configuration scripts end up on Garden Linux - scriptLocation = "/opt/gardener/bin" + // ScriptLocation is the location that Gardener configuration scripts end up on Garden Linux + ScriptLocation = "/opt/gardener/bin" // OSTypeGardenLinux is a constant for the Garden Linux extension OS type. OSTypeGardenLinux = "gardenlinux" diff --git a/pkg/generator/gardenlinux/scripts/containerd_cgroup_driver.sh b/pkg/gardenlinux/scripts/containerd_cgroup_driver.sh similarity index 100% rename from pkg/generator/gardenlinux/scripts/containerd_cgroup_driver.sh rename to pkg/gardenlinux/scripts/containerd_cgroup_driver.sh diff --git a/pkg/generator/gardenlinux/scripts/g_functions.sh b/pkg/gardenlinux/scripts/g_functions.sh similarity index 100% rename from pkg/generator/gardenlinux/scripts/g_functions.sh rename to pkg/gardenlinux/scripts/g_functions.sh diff --git a/pkg/generator/gardenlinux/scripts/kubelet_cgroup_driver.sh b/pkg/gardenlinux/scripts/kubelet_cgroup_driver.sh similarity index 100% rename from pkg/generator/gardenlinux/scripts/kubelet_cgroup_driver.sh rename to pkg/gardenlinux/scripts/kubelet_cgroup_driver.sh diff --git a/pkg/generator/memoryone/contants.go b/pkg/memoryone/constants.go similarity index 90% rename from pkg/generator/memoryone/contants.go rename to pkg/memoryone/constants.go index e73ef9432..bc35b0402 100644 --- a/pkg/generator/memoryone/contants.go +++ b/pkg/memoryone/constants.go @@ -15,6 +15,6 @@ package memoryone const ( - // OSTypMemoryOneGardenLinux is a constant for the Garden Linux extension OS type. + // OSTypeMemoryOneGardenLinux is a constant for the Garden Linux extension OS type. OSTypeMemoryOneGardenLinux = "memoryone-gardenlinux" ) diff --git a/pkg/generator/memoryone/values.go b/pkg/memoryone/values.go similarity index 68% rename from pkg/generator/memoryone/values.go rename to pkg/memoryone/values.go index c6b166b30..7f2507799 100644 --- a/pkg/generator/memoryone/values.go +++ b/pkg/memoryone/values.go @@ -33,23 +33,32 @@ func init() { decoder = serializer.NewCodecFactory(scheme).UniversalDecoder() } +func Configuration(osc *extensionsv1alpha1.OperatingSystemConfig) (*memoryonegardenlinux.OperatingSystemConfiguration, error) { + if osc.Spec.ProviderConfig == nil { + return nil, nil + } + + obj := &memoryonegardenlinux.OperatingSystemConfiguration{} + if _, _, err := decoder.Decode(osc.Spec.ProviderConfig.Raw, nil, obj); err != nil { + return nil, fmt.Errorf("failed to decode provider config: %+v", err) + } + + return obj, nil +} + func MemoryOneValues(osc *extensionsv1alpha1.OperatingSystemConfig, values map[string]interface{}) error { if osc.Spec.Type == OSTypeMemoryOneGardenLinux { - if osc.Spec.ProviderConfig == nil { - return nil - } - - obj := &memoryonegardenlinux.OperatingSystemConfiguration{} - if _, _, err := decoder.Decode(osc.Spec.ProviderConfig.Raw, nil, obj); err != nil { - return fmt.Errorf("failed to decode provider config: %+v", err) + config, err := Configuration(osc) + if err != nil { + return err } - if obj.MemoryTopology != nil { - values["MemoryOneMemoryTopology"] = *obj.MemoryTopology + if config.MemoryTopology != nil { + values["MemoryOneMemoryTopology"] = *config.MemoryTopology } - if obj.SystemMemory != nil { - values["MemoryOneSystemMemory"] = *obj.SystemMemory + if config.SystemMemory != nil { + values["MemoryOneSystemMemory"] = *config.SystemMemory } } From aa709d2e76c0863a806d50e0fc51b7578dcc9d9c Mon Sep 17 00:00:00 2001 From: Rafael Franzke Date: Wed, 11 Oct 2023 16:20:55 +0200 Subject: [PATCH 3/8] Introduce OSC controller - no longer use `oscommon` package - prepare for new OSC contract when `UseGardenerNodeAgent` feature gate is enabled --- go.mod | 2 + go.sum | 1 - .../operatingsystemconfig/actuator.go | 223 +++ .../operatingsystemconfig/actuator_test.go | 387 +++++ pkg/controller/operatingsystemconfig/add.go | 55 + .../operatingsystemconfig_suite_test.go | 27 + vendor/github.com/blang/semver/v4/LICENSE | 22 + vendor/github.com/blang/semver/v4/json.go | 23 + vendor/github.com/blang/semver/v4/range.go | 416 ++++++ vendor/github.com/blang/semver/v4/semver.go | 476 +++++++ vendor/github.com/blang/semver/v4/sort.go | 28 + vendor/github.com/blang/semver/v4/sql.go | 30 + .../github.com/evanphx/json-patch/.gitignore | 6 + vendor/github.com/evanphx/json-patch/LICENSE | 25 + .../github.com/evanphx/json-patch/README.md | 317 +++++ .../github.com/evanphx/json-patch/errors.go | 38 + vendor/github.com/evanphx/json-patch/merge.go | 389 +++++ vendor/github.com/evanphx/json-patch/patch.go | 809 +++++++++++ .../pkg/mock/controller-runtime/client/doc.go | 16 + .../mock/controller-runtime/client/mocks.go | 617 ++++++++ .../gardener/pkg/utils/test/gomock.go | 49 + .../gardener/pkg/utils/test/manager.go | 52 + .../pkg/utils/test/matchers/conditions.go | 74 + .../gardener/pkg/utils/test/matchers/deep.go | 81 ++ .../pkg/utils/test/matchers/fields.go | 38 + .../utils/test/matchers/kubernetes_errors.go | 47 + .../pkg/utils/test/matchers/matchers.go | 127 ++ .../pkg/utils/test/matchers/reference.go | 43 + .../gardener/pkg/utils/test/options.go | 142 ++ .../gardener/gardener/pkg/utils/test/test.go | 298 ++++ .../gardener/pkg/utils/test/test_resources.go | 138 ++ .../onsi/gomega/gstruct/elements.go | 231 +++ .../gomega/gstruct/errors/nested_types.go | 72 + .../github.com/onsi/gomega/gstruct/fields.go | 165 +++ .../github.com/onsi/gomega/gstruct/ignore.go | 39 + vendor/github.com/onsi/gomega/gstruct/keys.go | 126 ++ .../github.com/onsi/gomega/gstruct/pointer.go | 58 + .../github.com/onsi/gomega/gstruct/types.go | 15 + vendor/go.uber.org/mock/gomock/call.go | 471 ++++++ vendor/go.uber.org/mock/gomock/callset.go | 164 +++ vendor/go.uber.org/mock/gomock/controller.go | 324 +++++ vendor/go.uber.org/mock/gomock/doc.go | 60 + vendor/go.uber.org/mock/gomock/matchers.go | 346 +++++ vendor/k8s.io/client-go/testing/actions.go | 698 +++++++++ vendor/k8s.io/client-go/testing/fake.go | 220 +++ vendor/k8s.io/client-go/testing/fixture.go | 581 ++++++++ vendor/k8s.io/client-go/testing/interface.go | 66 + .../k8s.io/component-base/featuregate/OWNERS | 16 + .../featuregate/feature_gate.go | 385 +++++ vendor/k8s.io/component-base/metrics/OWNERS | 11 + .../k8s.io/component-base/metrics/buckets.go | 43 + .../component-base/metrics/collector.go | 190 +++ .../k8s.io/component-base/metrics/counter.go | 242 ++++ vendor/k8s.io/component-base/metrics/desc.go | 225 +++ vendor/k8s.io/component-base/metrics/gauge.go | 277 ++++ .../component-base/metrics/histogram.go | 214 +++ vendor/k8s.io/component-base/metrics/http.go | 87 ++ .../k8s.io/component-base/metrics/labels.go | 22 + .../metrics/legacyregistry/registry.go | 92 ++ .../k8s.io/component-base/metrics/metric.go | 235 +++ .../k8s.io/component-base/metrics/options.go | 125 ++ vendor/k8s.io/component-base/metrics/opts.go | 356 +++++ .../metrics/processstarttime.go | 51 + .../metrics/processstarttime_others.go | 39 + .../metrics/processstarttime_windows.go | 34 + .../metrics/prometheus/feature/metrics.go | 53 + .../prometheusextension/timing_histogram.go | 189 +++ .../timing_histogram_vec.go | 111 ++ .../prometheusextension/weighted_histogram.go | 203 +++ .../weighted_histogram_vec.go | 106 ++ .../k8s.io/component-base/metrics/registry.go | 385 +++++ .../k8s.io/component-base/metrics/summary.go | 226 +++ .../metrics/timing_histogram.go | 270 ++++ vendor/k8s.io/component-base/metrics/value.go | 70 + .../k8s.io/component-base/metrics/version.go | 37 + .../component-base/metrics/version_parser.go | 50 + .../k8s.io/component-base/metrics/wrappers.go | 167 +++ vendor/modules.txt | 21 + .../pkg/client/fake/client.go | 1260 +++++++++++++++++ .../controller-runtime/pkg/client/fake/doc.go | 38 + .../pkg/client/interceptor/intercept.go | 166 +++ .../pkg/internal/objectutil/objectutil.go | 42 + 82 files changed, 14669 insertions(+), 1 deletion(-) create mode 100644 pkg/controller/operatingsystemconfig/actuator.go create mode 100644 pkg/controller/operatingsystemconfig/actuator_test.go create mode 100644 pkg/controller/operatingsystemconfig/add.go create mode 100644 pkg/controller/operatingsystemconfig/operatingsystemconfig_suite_test.go create mode 100644 vendor/github.com/blang/semver/v4/LICENSE create mode 100644 vendor/github.com/blang/semver/v4/json.go create mode 100644 vendor/github.com/blang/semver/v4/range.go create mode 100644 vendor/github.com/blang/semver/v4/semver.go create mode 100644 vendor/github.com/blang/semver/v4/sort.go create mode 100644 vendor/github.com/blang/semver/v4/sql.go create mode 100644 vendor/github.com/evanphx/json-patch/.gitignore create mode 100644 vendor/github.com/evanphx/json-patch/LICENSE create mode 100644 vendor/github.com/evanphx/json-patch/README.md create mode 100644 vendor/github.com/evanphx/json-patch/errors.go create mode 100644 vendor/github.com/evanphx/json-patch/merge.go create mode 100644 vendor/github.com/evanphx/json-patch/patch.go create mode 100644 vendor/github.com/gardener/gardener/pkg/mock/controller-runtime/client/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/mock/controller-runtime/client/mocks.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/gomock.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/manager.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/matchers/conditions.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/matchers/deep.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/matchers/fields.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/matchers/kubernetes_errors.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/matchers/matchers.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/matchers/reference.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/options.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/test.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/test/test_resources.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/elements.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/fields.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/ignore.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/keys.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/pointer.go create mode 100644 vendor/github.com/onsi/gomega/gstruct/types.go create mode 100644 vendor/go.uber.org/mock/gomock/call.go create mode 100644 vendor/go.uber.org/mock/gomock/callset.go create mode 100644 vendor/go.uber.org/mock/gomock/controller.go create mode 100644 vendor/go.uber.org/mock/gomock/doc.go create mode 100644 vendor/go.uber.org/mock/gomock/matchers.go create mode 100644 vendor/k8s.io/client-go/testing/actions.go create mode 100644 vendor/k8s.io/client-go/testing/fake.go create mode 100644 vendor/k8s.io/client-go/testing/fixture.go create mode 100644 vendor/k8s.io/client-go/testing/interface.go create mode 100644 vendor/k8s.io/component-base/featuregate/OWNERS create mode 100644 vendor/k8s.io/component-base/featuregate/feature_gate.go create mode 100644 vendor/k8s.io/component-base/metrics/OWNERS create mode 100644 vendor/k8s.io/component-base/metrics/buckets.go create mode 100644 vendor/k8s.io/component-base/metrics/collector.go create mode 100644 vendor/k8s.io/component-base/metrics/counter.go create mode 100644 vendor/k8s.io/component-base/metrics/desc.go create mode 100644 vendor/k8s.io/component-base/metrics/gauge.go create mode 100644 vendor/k8s.io/component-base/metrics/histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/http.go create mode 100644 vendor/k8s.io/component-base/metrics/labels.go create mode 100644 vendor/k8s.io/component-base/metrics/legacyregistry/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/metric.go create mode 100644 vendor/k8s.io/component-base/metrics/options.go create mode 100644 vendor/k8s.io/component-base/metrics/opts.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime_others.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime_windows.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go create mode 100644 vendor/k8s.io/component-base/metrics/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/summary.go create mode 100644 vendor/k8s.io/component-base/metrics/timing_histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/value.go create mode 100644 vendor/k8s.io/component-base/metrics/version.go create mode 100644 vendor/k8s.io/component-base/metrics/version_parser.go create mode 100644 vendor/k8s.io/component-base/metrics/wrappers.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go diff --git a/go.mod b/go.mod index e6a5802d5..d55621ad9 100644 --- a/go.mod +++ b/go.mod @@ -25,11 +25,13 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/bronze1man/yaml2json v0.0.0-20211227013850-8972abeaea25 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fatih/color v1.15.0 // indirect github.com/fluent/fluent-operator/v2 v2.2.0 // indirect diff --git a/go.sum b/go.sum index ef8cc49ab..de417d3b9 100644 --- a/go.sum +++ b/go.sum @@ -76,7 +76,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= diff --git a/pkg/controller/operatingsystemconfig/actuator.go b/pkg/controller/operatingsystemconfig/actuator.go new file mode 100644 index 000000000..c931dfcf6 --- /dev/null +++ b/pkg/controller/operatingsystemconfig/actuator.go @@ -0,0 +1,223 @@ +// Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operatingsystemconfig + +import ( + "context" + _ "embed" + "fmt" + "path/filepath" + + "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig" + oscommonactuator "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator" + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + "github.com/go-logr/logr" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/controller/operatingsystemconfig/generator" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/gardenlinux" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/memoryone" +) + +type actuator struct { + client client.Client + useGardenerNodeAgent bool +} + +// NewActuator creates a new Actuator that updates the status of the handled OperatingSystemConfig resources. +func NewActuator(mgr manager.Manager, useGardenerNodeAgent bool) operatingsystemconfig.Actuator { + return &actuator{ + client: mgr.GetClient(), + useGardenerNodeAgent: useGardenerNodeAgent, + } +} + +func (a *actuator) Reconcile(ctx context.Context, log logr.Logger, osc *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, []extensionsv1alpha1.Unit, []extensionsv1alpha1.File, error) { + cloudConfig, command, err := oscommonactuator.CloudConfigFromOperatingSystemConfig(ctx, log, a.client, osc, generator.CloudInitGenerator()) + if err != nil { + return nil, nil, nil, nil, nil, nil, fmt.Errorf("could not generate cloud config: %w", err) + } + + switch purpose := osc.Spec.Purpose; purpose { + case extensionsv1alpha1.OperatingSystemConfigPurposeProvision: + if !a.useGardenerNodeAgent { + return cloudConfig, command, oscommonactuator.OperatingSystemConfigUnitNames(osc), oscommonactuator.OperatingSystemConfigFilePaths(osc), nil, nil, nil + } + userData, err := a.handleProvisionOSC(ctx, osc) + return []byte(userData), nil, nil, nil, nil, nil, err + + case extensionsv1alpha1.OperatingSystemConfigPurposeReconcile: + extensionUnits, extensionFiles, err := a.handleReconcileOSC(osc) + return cloudConfig, command, oscommonactuator.OperatingSystemConfigUnitNames(osc), oscommonactuator.OperatingSystemConfigFilePaths(osc), extensionUnits, extensionFiles, err + + default: + return nil, nil, nil, nil, nil, nil, fmt.Errorf("unknown purpose: %s", purpose) + } +} + +func (a *actuator) Delete(_ context.Context, _ logr.Logger, _ *extensionsv1alpha1.OperatingSystemConfig) error { + return nil +} + +func (a *actuator) Migrate(ctx context.Context, log logr.Logger, osc *extensionsv1alpha1.OperatingSystemConfig) error { + return a.Delete(ctx, log, osc) +} + +func (a *actuator) ForceDelete(ctx context.Context, log logr.Logger, osc *extensionsv1alpha1.OperatingSystemConfig) error { + return a.Delete(ctx, log, osc) +} + +func (a *actuator) Restore(ctx context.Context, log logr.Logger, osc *extensionsv1alpha1.OperatingSystemConfig) ([]byte, *string, []string, []string, []extensionsv1alpha1.Unit, []extensionsv1alpha1.File, error) { + return a.Reconcile(ctx, log, osc) +} + +func (a *actuator) handleProvisionOSC(ctx context.Context, osc *extensionsv1alpha1.OperatingSystemConfig) (string, error) { + writeFilesToDiskScript, err := operatingsystemconfig.FilesToDiskScript(ctx, a.client, osc.Namespace, osc.Spec.Files) + if err != nil { + return "", err + } + writeUnitsToDiskScript := operatingsystemconfig.UnitsToDiskScript(osc.Spec.Units) + + script := `#!/bin/bash +if [ ! -s /etc/containerd/config.toml ]; then + mkdir -p /etc/containerd/ + containerd config default > /etc/containerd/config.toml + chmod 0644 /etc/containerd/config.toml +fi + +mkdir -p /etc/systemd/system/containerd.service.d +cat < /etc/systemd/system/containerd.service.d/11-exec_config.conf +[Service] +ExecStart= +ExecStart=/usr/bin/containerd --config=/etc/containerd/config.toml +EOF +chmod 0644 /etc/systemd/system/containerd.service.d/11-exec_config.conf +` + writeFilesToDiskScript + ` +` + writeUnitsToDiskScript + ` +grep -sq "^nfsd$" /etc/modules || echo "nfsd" >>/etc/modules +modprobe nfsd +nslookup $(hostname) || systemctl restart systemd-networkd + +systemctl daemon-reload +systemctl enable containerd && systemctl restart containerd +systemctl enable docker && systemctl restart docker +systemctl enable gardener-node-init && systemctl restart gardener-node-init` + + if osc.Spec.Type == memoryone.OSTypeMemoryOneGardenLinux { + return wrapIntoMemoryOneHeaderAndFooter(osc, script) + } + + return script, nil +} + +func wrapIntoMemoryOneHeaderAndFooter(osc *extensionsv1alpha1.OperatingSystemConfig, in string) (string, error) { + config, err := memoryone.Configuration(osc) + if err != nil { + return "", err + } + + out := `Content-Type: multipart/mixed; boundary="==BOUNDARY==" +MIME-Version: 1.0 +--==BOUNDARY== +Content-Type: text/x-vsmp; section=vsmp` + + if config != nil && config.SystemMemory != nil { + out += fmt.Sprintf(` +system_memory=%s`, *config.SystemMemory) + } + if config != nil && config.MemoryTopology != nil { + out += fmt.Sprintf(` +mem_topology=%s`, *config.MemoryTopology) + } + + out += ` +--==BOUNDARY== +Content-Type: text/x-shellscript +` + in + ` +--==BOUNDARY==` + + return out, nil +} + +var ( + scriptContentGFunctions []byte + scriptContentKubeletCGroupDriver []byte + scriptContentContainerdCGroupDriver []byte +) + +func init() { + var err error + + scriptContentGFunctions, err = gardenlinux.Templates.ReadFile(filepath.Join("scripts", "g_functions.sh")) + utilruntime.Must(err) + scriptContentKubeletCGroupDriver, err = gardenlinux.Templates.ReadFile(filepath.Join("scripts", "kubelet_cgroup_driver.sh")) + utilruntime.Must(err) + scriptContentContainerdCGroupDriver, err = gardenlinux.Templates.ReadFile(filepath.Join("scripts", "containerd_cgroup_driver.sh")) + utilruntime.Must(err) +} + +func (a *actuator) handleReconcileOSC(_ *extensionsv1alpha1.OperatingSystemConfig) ([]extensionsv1alpha1.Unit, []extensionsv1alpha1.File, error) { + var ( + extensionUnits []extensionsv1alpha1.Unit + extensionFiles []extensionsv1alpha1.File + ) + + filePathFunctionsHelperScript := filepath.Join(gardenlinux.ScriptLocation, "g_functions.sh") + extensionFiles = append(extensionFiles, extensionsv1alpha1.File{ + Path: filePathFunctionsHelperScript, + Content: extensionsv1alpha1.FileContent{Inline: &extensionsv1alpha1.FileContentInline{Data: string(scriptContentGFunctions)}}, + Permissions: &gardenlinux.ScriptPermissions, + }) + + // add scripts and dropins for kubelet + filePathKubeletCGroupDriverScript := filepath.Join(gardenlinux.ScriptLocation, "kubelet_cgroup_driver.sh") + extensionFiles = append(extensionFiles, extensionsv1alpha1.File{ + Path: filePathKubeletCGroupDriverScript, + Content: extensionsv1alpha1.FileContent{Inline: &extensionsv1alpha1.FileContentInline{Data: string(scriptContentKubeletCGroupDriver)}}, + Permissions: &gardenlinux.ScriptPermissions, + }) + extensionUnits = append(extensionUnits, extensionsv1alpha1.Unit{ + Name: "kubelet.service", + DropIns: []extensionsv1alpha1.DropIn{{ + Name: "10-configure-cgroup-driver.conf", + Content: `[Service] +ExecStartPre=` + filePathKubeletCGroupDriverScript + ` +`, + }}, + FilePaths: []string{filePathFunctionsHelperScript, filePathKubeletCGroupDriverScript}, + }) + + // add scripts and dropins for containerd if activated + filePathContainerdCGroupDriverScript := filepath.Join(gardenlinux.ScriptLocation, "containerd_cgroup_driver.sh") + extensionFiles = append(extensionFiles, extensionsv1alpha1.File{ + Path: filePathContainerdCGroupDriverScript, + Content: extensionsv1alpha1.FileContent{Inline: &extensionsv1alpha1.FileContentInline{Data: string(scriptContentContainerdCGroupDriver)}}, + Permissions: &gardenlinux.ScriptPermissions, + }) + extensionUnits = append(extensionUnits, extensionsv1alpha1.Unit{ + Name: "containerd.service", + DropIns: []extensionsv1alpha1.DropIn{{ + Name: "10-configure-cgroup-driver.conf", + Content: `[Service] +ExecStartPre=` + filePathContainerdCGroupDriverScript + ` +`, + }}, + FilePaths: []string{filePathFunctionsHelperScript, filePathContainerdCGroupDriverScript}, + }) + + return extensionUnits, extensionFiles, nil +} diff --git a/pkg/controller/operatingsystemconfig/actuator_test.go b/pkg/controller/operatingsystemconfig/actuator_test.go new file mode 100644 index 000000000..8c543ea11 --- /dev/null +++ b/pkg/controller/operatingsystemconfig/actuator_test.go @@ -0,0 +1,387 @@ +// Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operatingsystemconfig_test + +import ( + "context" + + "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig" + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + "github.com/gardener/gardener/pkg/utils/test" + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/manager" + + . "github.com/gardener/gardener-extension-os-gardenlinux/pkg/controller/operatingsystemconfig" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/gardenlinux" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/memoryone" +) + +var _ = Describe("Actuator", func() { + var ( + ctx = context.TODO() + log = logr.Discard() + fakeClient client.Client + mgr manager.Manager + + osc *extensionsv1alpha1.OperatingSystemConfig + actuator operatingsystemconfig.Actuator + ) + + BeforeEach(func() { + fakeClient = fakeclient.NewClientBuilder().Build() + mgr = test.FakeManager{Client: fakeClient} + + osc = &extensionsv1alpha1.OperatingSystemConfig{ + Spec: extensionsv1alpha1.OperatingSystemConfigSpec{ + DefaultSpec: extensionsv1alpha1.DefaultSpec{ + Type: gardenlinux.OSTypeGardenLinux, + }, + Purpose: extensionsv1alpha1.OperatingSystemConfigPurposeProvision, + Units: []extensionsv1alpha1.Unit{{Name: "some-unit", Content: pointer.String("foo")}}, + Files: []extensionsv1alpha1.File{{Path: "/some/file", Content: extensionsv1alpha1.FileContent{Inline: &extensionsv1alpha1.FileContentInline{Data: "bar"}}}}, + }, + } + }) + + When("UseGardenerNodeAgent is false", func() { + BeforeEach(func() { + actuator = NewActuator(mgr, false) + }) + + Describe("#Reconcile", func() { + It("should not return an error", func() { + userData, command, unitNames, fileNames, extensionUnits, extensionFiles, err := actuator.Reconcile(ctx, log, osc) + Expect(err).NotTo(HaveOccurred()) + + Expect(userData).NotTo(BeEmpty()) // legacy logic is tested in ./generator/generator_test.go + Expect(command).To(BeNil()) + Expect(unitNames).To(ConsistOf("some-unit")) + Expect(fileNames).To(ConsistOf("/some/file")) + Expect(extensionUnits).To(BeEmpty()) + Expect(extensionFiles).To(BeEmpty()) + }) + }) + }) + + When("UseGardenerNodeAgent is true", func() { + BeforeEach(func() { + actuator = NewActuator(mgr, true) + }) + + When("purpose is 'provision'", func() { + expectedUserData := `#!/bin/bash +if [ ! -s /etc/containerd/config.toml ]; then + mkdir -p /etc/containerd/ + containerd config default > /etc/containerd/config.toml + chmod 0644 /etc/containerd/config.toml +fi + +mkdir -p /etc/systemd/system/containerd.service.d +cat < /etc/systemd/system/containerd.service.d/11-exec_config.conf +[Service] +ExecStart= +ExecStart=/usr/bin/containerd --config=/etc/containerd/config.toml +EOF +chmod 0644 /etc/systemd/system/containerd.service.d/11-exec_config.conf + +mkdir -p "/some" + +cat << EOF | base64 -d > "/some/file" +YmFy +EOF + + +cat << EOF | base64 -d > "/etc/systemd/system/some-unit" +Zm9v +EOF +grep -sq "^nfsd$" /etc/modules || echo "nfsd" >>/etc/modules +modprobe nfsd +nslookup $(hostname) || systemctl restart systemd-networkd + +systemctl daemon-reload +systemctl enable containerd && systemctl restart containerd +systemctl enable docker && systemctl restart docker +systemctl enable gardener-node-init && systemctl restart gardener-node-init` + + When("OS type is 'gardenlinux'", func() { + Describe("#Reconcile", func() { + It("should not return an error", func() { + userData, command, unitNames, fileNames, extensionUnits, extensionFiles, err := actuator.Reconcile(ctx, log, osc) + Expect(err).NotTo(HaveOccurred()) + + Expect(string(userData)).To(Equal(expectedUserData)) + Expect(command).To(BeNil()) + Expect(unitNames).To(BeEmpty()) + Expect(fileNames).To(BeEmpty()) + Expect(extensionUnits).To(BeEmpty()) + Expect(extensionFiles).To(BeEmpty()) + }) + }) + }) + + When("OS type is 'memoryone-gardenlinux'", func() { + BeforeEach(func() { + osc.Spec.Type = memoryone.OSTypeMemoryOneGardenLinux + osc.Spec.ProviderConfig = &runtime.RawExtension{Raw: []byte(`apiVersion: memoryone-gardenlinux.os.extensions.gardener.cloud/v1alpha1 +kind: OperatingSystemConfiguration +memoryTopology: "2" +systemMemory: "6x"`)} + }) + + Describe("#Reconcile", func() { + It("should not return an error", func() { + userData, command, unitNames, fileNames, extensionUnits, extensionFiles, err := actuator.Reconcile(ctx, log, osc) + Expect(err).NotTo(HaveOccurred()) + + Expect(string(userData)).To(Equal(`Content-Type: multipart/mixed; boundary="==BOUNDARY==" +MIME-Version: 1.0 +--==BOUNDARY== +Content-Type: text/x-vsmp; section=vsmp +system_memory=6x +mem_topology=2 +--==BOUNDARY== +Content-Type: text/x-shellscript +` + expectedUserData + ` +--==BOUNDARY==`)) + Expect(command).To(BeNil()) + Expect(unitNames).To(BeEmpty()) + Expect(fileNames).To(BeEmpty()) + Expect(extensionUnits).To(BeEmpty()) + Expect(extensionFiles).To(BeEmpty()) + }) + }) + }) + }) + + When("purpose is 'reconcile'", func() { + BeforeEach(func() { + osc.Spec.Purpose = extensionsv1alpha1.OperatingSystemConfigPurposeReconcile + }) + + Describe("#Reconcile", func() { + It("should not return an error", func() { + userData, command, unitNames, fileNames, extensionUnits, extensionFiles, err := actuator.Reconcile(ctx, log, osc) + Expect(err).NotTo(HaveOccurred()) + + Expect(userData).NotTo(BeEmpty()) // legacy logic is tested in ./generator/generator_test.go + Expect(command).To(BeNil()) + Expect(unitNames).To(ConsistOf("some-unit")) + Expect(fileNames).To(ConsistOf("/some/file")) + Expect(extensionUnits).To(ConsistOf( + extensionsv1alpha1.Unit{ + Name: "kubelet.service", + DropIns: []extensionsv1alpha1.DropIn{{ + Name: "10-configure-cgroup-driver.conf", + Content: `[Service] +ExecStartPre=/opt/gardener/bin/kubelet_cgroup_driver.sh +`, + }}, + FilePaths: []string{ + "/opt/gardener/bin/g_functions.sh", + "/opt/gardener/bin/kubelet_cgroup_driver.sh", + }, + }, + extensionsv1alpha1.Unit{ + Name: "containerd.service", + DropIns: []extensionsv1alpha1.DropIn{{ + Name: "10-configure-cgroup-driver.conf", + Content: `[Service] +ExecStartPre=/opt/gardener/bin/containerd_cgroup_driver.sh +`, + }}, + FilePaths: []string{ + "/opt/gardener/bin/g_functions.sh", + "/opt/gardener/bin/containerd_cgroup_driver.sh", + }, + }, + )) + Expect(extensionFiles).To(ConsistOf( + extensionsv1alpha1.File{ + Path: "/opt/gardener/bin/g_functions.sh", + Permissions: pointer.Int32(0755), + Content: extensionsv1alpha1.FileContent{Inline: &extensionsv1alpha1.FileContentInline{Data: `#!/bin/bash + +set -Eeuo pipefail + +function get_fs_of_directory { + [ -z "$1" ] || [ ! -d "$1" ] && return + echo -n "$(stat -c %T -f "$1")" +} + +function check_current_cgroup { + # determining if the system is running cgroupv1 or cgroupv2 + # using systemd approach as in + # https://github.com/systemd/systemd/blob/d6d450074ff7729d43476804e0e19c049c03141d/src/basic/cgroup-util.c#L2105-L2149 + + CGROUP_ID="cgroupfs" + CGROUP2_ID="cgroup2fs" + TMPFS_ID="tmpfs" + + cgroup_dir_fs="$(get_fs_of_directory /sys/fs/cgroup)" + + if [[ "$cgroup_dir_fs" == "$CGROUP2_ID" ]]; then + echo "v2" + return + elif [[ "$cgroup_dir_fs" == "$TMPFS_ID" ]]; then + if [[ "$(get_fs_of_directory /sys/fs/cgroup/unified)" == "$CGROUP2_ID" ]]; then + echo "v1 (cgroupv2systemd)" + return + fi + if [[ "$(get_fs_of_directory /sys/fs/cgroup/systemd)" == "$CGROUP2_ID" ]]; then + echo "v1 (cgroupv2systemd232)" + return + fi + if [[ "$(get_fs_of_directory /sys/fs/cgroup/systemd)" == "$CGROUP_ID" ]]; then + echo "v1" + return + fi + fi + # if we came this far despite all those returns, it means something went wrong + echo "failed to determine cgroup version for this system" >&2 + exit 1 +} + +function check_running_containerd_tasks { + containerd_runtime_status_dir=/run/containerd/io.containerd.runtime.v2.task/k8s.io + + # if the status dir for k8s.io namespace does not exist, there are no containers + # in said namespace + if [ ! -d $containerd_runtime_status_dir ]; then + echo "$containerd_runtime_status_dir does not exists - no tasks in k8s.io namespace" + return 0 + fi + + # count the number of containerd tasks in the k8s.io namespace + num_tasks=$(ls -1 /run/containerd/io.containerd.runtime.v2.task/k8s.io/ | wc -l) + + if [ "$num_tasks" -eq 0 ]; then + echo "no active tasks in k8s.io namespace" + return 0 + fi + + echo "there are $num_tasks active tasks in the k8s.io containerd namespace - terminating" + return 1 +} +`}}, + }, + extensionsv1alpha1.File{ + Path: "/opt/gardener/bin/kubelet_cgroup_driver.sh", + Permissions: pointer.Int32(0755), + Content: extensionsv1alpha1.FileContent{Inline: &extensionsv1alpha1.FileContentInline{Data: `#!/bin/bash + +set -Eeuo pipefail + +source "$(dirname $0)/g_functions.sh" + +KUBELET_CONFIG="/var/lib/kubelet/config/kubelet" + +# reconfigure the kubelet to use systemd as a cgroup driver on cgroup v2 enabled systems +function configure_kubelet { + desired_cgroup_driver=$1 + + if [ ! -s "$KUBELET_CONFIG" ]; then + echo "$KUBELET_CONFIG does not exist" >&2 + return + fi + + if [[ "$desired_cgroup_driver" == "systemd" ]]; then + echo "Configuring kubelet to use systemd as cgroup driver" + sed -i "s/cgroupDriver: cgroupfs/cgroupDriver: systemd/" "$KUBELET_CONFIG" + else + echo "Configuring kubelet to use cgroupfs as cgroup driver" + sed -i "s/cgroupDriver: systemd/cgroupDriver: cgroupfs/" "$KUBELET_CONFIG" + fi +} + +# determine which cgroup driver the kubelet is currently configured with +function get_kubelet_cgroup_driver { + kubelet_cgroup_driver=$(grep cgroupDriver "$KUBELET_CONFIG" | awk -F ':' '{print $2}' | sed 's/^\W//g') + echo "$kubelet_cgroup_driver" +} + +# determine which cgroup driver containerd is using - this requires that the SystemdCgroup is in containerds +# running config - if it has been removed from the configfile, this will fail +function get_containerd_cgroup_driver { + systemd_cgroup_driver=$(containerd config dump | grep SystemdCgroup | awk -F '=' '{print $2}' | sed 's/^\W//g') + + if [ "$systemd_cgroup_driver" == "true" ]; then + echo systemd + return + else + echo cgroupfs + return + fi +} + +if [ "$(get_kubelet_cgroup_driver)" != "$(get_containerd_cgroup_driver)" ]; then + configure_kubelet "$(get_containerd_cgroup_driver)" +else + cgroup_driver=$(get_kubelet_cgroup_driver) + echo "kubelet and containerd are configured with the same cgroup driver ($cgroup_driver) - nothing to do" +fi +`}}, + }, + extensionsv1alpha1.File{ + Path: "/opt/gardener/bin/containerd_cgroup_driver.sh", + Permissions: pointer.Int32(0755), + Content: extensionsv1alpha1.FileContent{Inline: &extensionsv1alpha1.FileContentInline{Data: `#!/bin/bash + +set -Eeuo pipefail + +source "$(dirname $0)/g_functions.sh" + +# reconfigures containerd to use systemd as a cgroup driver on cgroup v2 enabled systems +function configure_containerd { + desired_cgroup=$1 + CONTAINERD_CONFIG="/etc/containerd/config.toml" + + if [ ! -s "$CONTAINERD_CONFIG" ]; then + echo "$CONTAINERD_CONFIG does not exist" >&2 + return + fi + + if [[ "$desired_cgroup" == "v2" ]]; then + echo "Configuring containerd cgroup driver to systemd" + sed -i "s/SystemdCgroup *= *false/SystemdCgroup = true/" "$CONTAINERD_CONFIG" + else + echo "Configuring containerd cgroup driver to cgroupfs" + sed -i "s/SystemdCgroup *= *true/SystemdCgroup = false/" "$CONTAINERD_CONFIG" + fi +} + +if check_running_containerd_tasks; then + configure_containerd "$(check_current_cgroup)" + + # in rare cases it could be that the kubelet.service was already running when + # containerd got reconfigured so we restart it to force its ExecStartPre + if systemctl is-active kubelet.service; then + echo "triggering kubelet restart..." + systemctl restart --no-block kubelet.service + fi +fi +`}}, + }, + )) + }) + }) + }) + }) +}) diff --git a/pkg/controller/operatingsystemconfig/add.go b/pkg/controller/operatingsystemconfig/add.go new file mode 100644 index 000000000..8be11694a --- /dev/null +++ b/pkg/controller/operatingsystemconfig/add.go @@ -0,0 +1,55 @@ +// Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operatingsystemconfig + +import ( + "context" + + "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/gardenlinux" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/memoryone" +) + +// DefaultAddOptions are the default AddOptions for AddToManager. +var DefaultAddOptions = AddOptions{} + +// AddOptions are options to apply when adding the OSC controller to the manager. +type AddOptions struct { + // Controller are the controller.Options. + Controller controller.Options + // IgnoreOperationAnnotation specifies whether to ignore the operation annotation or not. + IgnoreOperationAnnotation bool + // UseGardenerNodeAgent specifies whether the gardener-node-agent feature is enabled. + UseGardenerNodeAgent bool +} + +// AddToManagerWithOptions adds a controller with the given Options to the given manager. +// The opts.Reconciler is being set with a newly instantiated actuator. +func AddToManagerWithOptions(ctx context.Context, mgr manager.Manager, opts AddOptions) error { + return operatingsystemconfig.Add(mgr, operatingsystemconfig.AddArgs{ + Actuator: NewActuator(mgr, opts.UseGardenerNodeAgent), + Predicates: operatingsystemconfig.DefaultPredicates(ctx, mgr, opts.IgnoreOperationAnnotation), + Types: []string{gardenlinux.OSTypeGardenLinux, memoryone.OSTypeMemoryOneGardenLinux}, + ControllerOptions: opts.Controller, + }) +} + +// AddToManager adds a controller with the default Options. +func AddToManager(ctx context.Context, mgr manager.Manager) error { + return AddToManagerWithOptions(ctx, mgr, DefaultAddOptions) +} diff --git a/pkg/controller/operatingsystemconfig/operatingsystemconfig_suite_test.go b/pkg/controller/operatingsystemconfig/operatingsystemconfig_suite_test.go new file mode 100644 index 000000000..bb6639b0e --- /dev/null +++ b/pkg/controller/operatingsystemconfig/operatingsystemconfig_suite_test.go @@ -0,0 +1,27 @@ +// Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package operatingsystemconfig_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestOperatingSystemConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Controller OperatingSystemConfig Suite") +} diff --git a/vendor/github.com/blang/semver/v4/LICENSE b/vendor/github.com/blang/semver/v4/LICENSE new file mode 100644 index 000000000..5ba5c86fc --- /dev/null +++ b/vendor/github.com/blang/semver/v4/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/v4/json.go b/vendor/github.com/blang/semver/v4/json.go new file mode 100644 index 000000000..a74bf7c44 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/v4/range.go b/vendor/github.com/blang/semver/v4/range.go new file mode 100644 index 000000000..95f7139b9 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Contains(ap, "x") { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/v4/semver.go b/vendor/github.com/blang/semver/v4/semver.go new file mode 100644 index 000000000..307de610f --- /dev/null +++ b/vendor/github.com/blang/semver/v4/semver.go @@ -0,0 +1,476 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precedence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// FinalizeVersion discards prerelease and build number and only returns +// major, minor and patch number. +func (v Version) FinalizeVersion() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// IncrementPatch increments the patch version +func (v *Version) IncrementPatch() error { + v.Patch++ + return nil +} + +// IncrementMinor increments the minor version +func (v *Version) IncrementMinor() error { + v.Minor++ + v.Patch = 0 + return nil +} + +// IncrementMajor increments the major version +func (v *Version) IncrementMajor() error { + v.Major++ + v.Minor = 0 + v.Patch = 0 + return nil +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (*Version, error) { + v, err := Parse(s) + vp := &v + return vp, err +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions +// with only major and minor components specified, and removes leading 0s. +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + // Remove leading zeros. + for i, p := range parts { + if len(p) > 1 { + p = strings.TrimLeft(p, "0") + if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") { + p = "0" + p + } + parts[i] = p + } + } + // Fill up shortened versions. + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + } + s = strings.Join(parts, ".") + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} + +// FinalizeVersion returns the major, minor and patch number only and discards +// prerelease and build number. +func FinalizeVersion(s string) (string, error) { + v, err := Parse(s) + if err != nil { + return "", err + } + v.Pre = nil + v.Build = nil + + finalVer := v.String() + return finalVer, nil +} diff --git a/vendor/github.com/blang/semver/v4/sort.go b/vendor/github.com/blang/semver/v4/sort.go new file mode 100644 index 000000000..e18f88082 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/v4/sql.go b/vendor/github.com/blang/semver/v4/sql.go new file mode 100644 index 000000000..db958134f --- /dev/null +++ b/vendor/github.com/blang/semver/v4/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("version.Scan: cannot convert %T to string", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/evanphx/json-patch/.gitignore b/vendor/github.com/evanphx/json-patch/.gitignore new file mode 100644 index 000000000..b7ed7f956 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.gitignore @@ -0,0 +1,6 @@ +# editor and IDE paraphernalia +.idea +.vscode + +# macOS paraphernalia +.DS_Store diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 000000000..df76d7d77 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 000000000..28e351693 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,317 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionality for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch/v5 +``` + +**Stable Versions**: +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, + which limits the total size increase in bytes caused by "copy" operations in a + patch. It defaults to 0, which means there is no limit. + +These global variables control the behavior of `jsonpatch.Apply`. + +An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior +is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. + +Structure `jsonpatch.ApplyOptions` includes the configuration options above +and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. + +When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore +`remove` operations whose `path` points to a non-existent location in the JSON document. +`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` +returning an error when hitting a missing `path` on `remove`. + +When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure +that `add` operations produce all the `path` elements that are missing from the target object. + +Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` +whose values are populated from the global configuration variables. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "different"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "different" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go new file mode 100644 index 000000000..75304b443 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 000000000..ad88d4018 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,389 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + if !mergeMerge { + pruneNulls(v) + } + + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + } + newAry = append(newAry, v) + } + + *ary = newAry + + return ary +} + +var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, ErrBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, ErrBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, ErrBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, ErrBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, ErrBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, ErrBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, ErrBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + if len(bt) != len(at) { + return false + } + for key := range bt { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 000000000..4bce5936d --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,809 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + if len(n.doc) != len(o.doc) { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if (v == nil) != (ov == nil) { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(ary) + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx < 0 { + if !SupportNegativeIndices { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(cur) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + if len(doc) == 0 { + return doc, nil + } + + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/gardener/gardener/pkg/mock/controller-runtime/client/doc.go b/vendor/github.com/gardener/gardener/pkg/mock/controller-runtime/client/doc.go new file mode 100644 index 000000000..dd93a846d --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/mock/controller-runtime/client/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//go:generate mockgen -package client -destination=mocks.go sigs.k8s.io/controller-runtime/pkg/client Client,StatusWriter,Reader,Writer,SubResourceClient + +package client diff --git a/vendor/github.com/gardener/gardener/pkg/mock/controller-runtime/client/mocks.go b/vendor/github.com/gardener/gardener/pkg/mock/controller-runtime/client/mocks.go new file mode 100644 index 000000000..52916b944 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/mock/controller-runtime/client/mocks.go @@ -0,0 +1,617 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: sigs.k8s.io/controller-runtime/pkg/client (interfaces: Client,StatusWriter,Reader,Writer,SubResourceClient) + +// Package client is a generated GoMock package. +package client + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + meta "k8s.io/apimachinery/pkg/api/meta" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockClient) Create(arg0 context.Context, arg1 client.Object, arg2 ...client.CreateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Create", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create. +func (mr *MockClientMockRecorder) Create(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockClient)(nil).Create), varargs...) +} + +// Delete mocks base method. +func (m *MockClient) Delete(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Delete", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockClientMockRecorder) Delete(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockClient)(nil).Delete), varargs...) +} + +// DeleteAllOf mocks base method. +func (m *MockClient) DeleteAllOf(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteAllOfOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteAllOf", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAllOf indicates an expected call of DeleteAllOf. +func (mr *MockClientMockRecorder) DeleteAllOf(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllOf", reflect.TypeOf((*MockClient)(nil).DeleteAllOf), varargs...) +} + +// Get mocks base method. +func (m *MockClient) Get(arg0 context.Context, arg1 types.NamespacedName, arg2 client.Object, arg3 ...client.GetOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Get", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), varargs...) +} + +// GroupVersionKindFor mocks base method. +func (m *MockClient) GroupVersionKindFor(arg0 runtime.Object) (schema.GroupVersionKind, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GroupVersionKindFor", arg0) + ret0, _ := ret[0].(schema.GroupVersionKind) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GroupVersionKindFor indicates an expected call of GroupVersionKindFor. +func (mr *MockClientMockRecorder) GroupVersionKindFor(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupVersionKindFor", reflect.TypeOf((*MockClient)(nil).GroupVersionKindFor), arg0) +} + +// IsObjectNamespaced mocks base method. +func (m *MockClient) IsObjectNamespaced(arg0 runtime.Object) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsObjectNamespaced", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsObjectNamespaced indicates an expected call of IsObjectNamespaced. +func (mr *MockClientMockRecorder) IsObjectNamespaced(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsObjectNamespaced", reflect.TypeOf((*MockClient)(nil).IsObjectNamespaced), arg0) +} + +// List mocks base method. +func (m *MockClient) List(arg0 context.Context, arg1 client.ObjectList, arg2 ...client.ListOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "List", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockClientMockRecorder) List(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockClient)(nil).List), varargs...) +} + +// Patch mocks base method. +func (m *MockClient) Patch(arg0 context.Context, arg1 client.Object, arg2 client.Patch, arg3 ...client.PatchOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Patch", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Patch indicates an expected call of Patch. +func (mr *MockClientMockRecorder) Patch(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockClient)(nil).Patch), varargs...) +} + +// RESTMapper mocks base method. +func (m *MockClient) RESTMapper() meta.RESTMapper { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RESTMapper") + ret0, _ := ret[0].(meta.RESTMapper) + return ret0 +} + +// RESTMapper indicates an expected call of RESTMapper. +func (mr *MockClientMockRecorder) RESTMapper() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RESTMapper", reflect.TypeOf((*MockClient)(nil).RESTMapper)) +} + +// Scheme mocks base method. +func (m *MockClient) Scheme() *runtime.Scheme { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Scheme") + ret0, _ := ret[0].(*runtime.Scheme) + return ret0 +} + +// Scheme indicates an expected call of Scheme. +func (mr *MockClientMockRecorder) Scheme() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scheme", reflect.TypeOf((*MockClient)(nil).Scheme)) +} + +// Status mocks base method. +func (m *MockClient) Status() client.SubResourceWriter { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Status") + ret0, _ := ret[0].(client.SubResourceWriter) + return ret0 +} + +// Status indicates an expected call of Status. +func (mr *MockClientMockRecorder) Status() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockClient)(nil).Status)) +} + +// SubResource mocks base method. +func (m *MockClient) SubResource(arg0 string) client.SubResourceClient { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubResource", arg0) + ret0, _ := ret[0].(client.SubResourceClient) + return ret0 +} + +// SubResource indicates an expected call of SubResource. +func (mr *MockClientMockRecorder) SubResource(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockClient)(nil).SubResource), arg0) +} + +// Update mocks base method. +func (m *MockClient) Update(arg0 context.Context, arg1 client.Object, arg2 ...client.UpdateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Update", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update. +func (mr *MockClientMockRecorder) Update(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockClient)(nil).Update), varargs...) +} + +// MockStatusWriter is a mock of StatusWriter interface. +type MockStatusWriter struct { + ctrl *gomock.Controller + recorder *MockStatusWriterMockRecorder +} + +// MockStatusWriterMockRecorder is the mock recorder for MockStatusWriter. +type MockStatusWriterMockRecorder struct { + mock *MockStatusWriter +} + +// NewMockStatusWriter creates a new mock instance. +func NewMockStatusWriter(ctrl *gomock.Controller) *MockStatusWriter { + mock := &MockStatusWriter{ctrl: ctrl} + mock.recorder = &MockStatusWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStatusWriter) EXPECT() *MockStatusWriterMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockStatusWriter) Create(arg0 context.Context, arg1, arg2 client.Object, arg3 ...client.SubResourceCreateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Create", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create. +func (mr *MockStatusWriterMockRecorder) Create(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockStatusWriter)(nil).Create), varargs...) +} + +// Patch mocks base method. +func (m *MockStatusWriter) Patch(arg0 context.Context, arg1 client.Object, arg2 client.Patch, arg3 ...client.SubResourcePatchOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Patch", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Patch indicates an expected call of Patch. +func (mr *MockStatusWriterMockRecorder) Patch(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockStatusWriter)(nil).Patch), varargs...) +} + +// Update mocks base method. +func (m *MockStatusWriter) Update(arg0 context.Context, arg1 client.Object, arg2 ...client.SubResourceUpdateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Update", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update. +func (mr *MockStatusWriterMockRecorder) Update(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockStatusWriter)(nil).Update), varargs...) +} + +// MockReader is a mock of Reader interface. +type MockReader struct { + ctrl *gomock.Controller + recorder *MockReaderMockRecorder +} + +// MockReaderMockRecorder is the mock recorder for MockReader. +type MockReaderMockRecorder struct { + mock *MockReader +} + +// NewMockReader creates a new mock instance. +func NewMockReader(ctrl *gomock.Controller) *MockReader { + mock := &MockReader{ctrl: ctrl} + mock.recorder = &MockReaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockReader) EXPECT() *MockReaderMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockReader) Get(arg0 context.Context, arg1 types.NamespacedName, arg2 client.Object, arg3 ...client.GetOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Get", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockReaderMockRecorder) Get(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockReader)(nil).Get), varargs...) +} + +// List mocks base method. +func (m *MockReader) List(arg0 context.Context, arg1 client.ObjectList, arg2 ...client.ListOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "List", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockReaderMockRecorder) List(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockReader)(nil).List), varargs...) +} + +// MockWriter is a mock of Writer interface. +type MockWriter struct { + ctrl *gomock.Controller + recorder *MockWriterMockRecorder +} + +// MockWriterMockRecorder is the mock recorder for MockWriter. +type MockWriterMockRecorder struct { + mock *MockWriter +} + +// NewMockWriter creates a new mock instance. +func NewMockWriter(ctrl *gomock.Controller) *MockWriter { + mock := &MockWriter{ctrl: ctrl} + mock.recorder = &MockWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWriter) EXPECT() *MockWriterMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockWriter) Create(arg0 context.Context, arg1 client.Object, arg2 ...client.CreateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Create", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create. +func (mr *MockWriterMockRecorder) Create(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockWriter)(nil).Create), varargs...) +} + +// Delete mocks base method. +func (m *MockWriter) Delete(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Delete", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockWriterMockRecorder) Delete(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockWriter)(nil).Delete), varargs...) +} + +// DeleteAllOf mocks base method. +func (m *MockWriter) DeleteAllOf(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteAllOfOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteAllOf", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAllOf indicates an expected call of DeleteAllOf. +func (mr *MockWriterMockRecorder) DeleteAllOf(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllOf", reflect.TypeOf((*MockWriter)(nil).DeleteAllOf), varargs...) +} + +// Patch mocks base method. +func (m *MockWriter) Patch(arg0 context.Context, arg1 client.Object, arg2 client.Patch, arg3 ...client.PatchOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Patch", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Patch indicates an expected call of Patch. +func (mr *MockWriterMockRecorder) Patch(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockWriter)(nil).Patch), varargs...) +} + +// Update mocks base method. +func (m *MockWriter) Update(arg0 context.Context, arg1 client.Object, arg2 ...client.UpdateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Update", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update. +func (mr *MockWriterMockRecorder) Update(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockWriter)(nil).Update), varargs...) +} + +// MockSubResourceClient is a mock of SubResourceClient interface. +type MockSubResourceClient struct { + ctrl *gomock.Controller + recorder *MockSubResourceClientMockRecorder +} + +// MockSubResourceClientMockRecorder is the mock recorder for MockSubResourceClient. +type MockSubResourceClientMockRecorder struct { + mock *MockSubResourceClient +} + +// NewMockSubResourceClient creates a new mock instance. +func NewMockSubResourceClient(ctrl *gomock.Controller) *MockSubResourceClient { + mock := &MockSubResourceClient{ctrl: ctrl} + mock.recorder = &MockSubResourceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSubResourceClient) EXPECT() *MockSubResourceClientMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockSubResourceClient) Create(arg0 context.Context, arg1, arg2 client.Object, arg3 ...client.SubResourceCreateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Create", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create. +func (mr *MockSubResourceClientMockRecorder) Create(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockSubResourceClient)(nil).Create), varargs...) +} + +// Get mocks base method. +func (m *MockSubResourceClient) Get(arg0 context.Context, arg1, arg2 client.Object, arg3 ...client.SubResourceGetOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Get", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockSubResourceClientMockRecorder) Get(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSubResourceClient)(nil).Get), varargs...) +} + +// Patch mocks base method. +func (m *MockSubResourceClient) Patch(arg0 context.Context, arg1 client.Object, arg2 client.Patch, arg3 ...client.SubResourcePatchOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Patch", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Patch indicates an expected call of Patch. +func (mr *MockSubResourceClientMockRecorder) Patch(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockSubResourceClient)(nil).Patch), varargs...) +} + +// Update mocks base method. +func (m *MockSubResourceClient) Update(arg0 context.Context, arg1 client.Object, arg2 ...client.SubResourceUpdateOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Update", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update. +func (mr *MockSubResourceClientMockRecorder) Update(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockSubResourceClient)(nil).Update), varargs...) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/gomock.go b/vendor/github.com/gardener/gardener/pkg/utils/test/gomock.go new file mode 100644 index 000000000..2f1b54208 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/gomock.go @@ -0,0 +1,49 @@ +// Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "fmt" + + "go.uber.org/mock/gomock" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// HasObjectKeyOf returns a gomock.Matcher that matches if actual is a client.Object that has the same +// ObjectKey as expected. +func HasObjectKeyOf(expected client.Object) gomock.Matcher { + return &objectKeyMatcher{key: client.ObjectKeyFromObject(expected)} +} + +type objectKeyMatcher struct { + key client.ObjectKey +} + +func (o *objectKeyMatcher) Matches(actual interface{}) bool { + if actual == nil { + return false + } + + obj, ok := actual.(client.Object) + if !ok { + return false + } + + return o.key == client.ObjectKeyFromObject(obj) +} + +func (o *objectKeyMatcher) String() string { + return fmt.Sprintf("has object key %q", o.key) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/manager.go b/vendor/github.com/gardener/gardener/pkg/utils/test/manager.go new file mode 100644 index 000000000..6aa73ae9a --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/manager.go @@ -0,0 +1,52 @@ +// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// FakeManager fakes a manager.Manager. +type FakeManager struct { + manager.Manager + + Client client.Client + Cache cache.Cache + EventRecorder record.EventRecorder + APIReader client.Reader +} + +// GetClient returns the given client. +func (f FakeManager) GetClient() client.Client { + return f.Client +} + +// GetCache returns the given cache. +func (f FakeManager) GetCache() cache.Cache { + return f.Cache +} + +// GetEventRecorderFor returns the given eventRecorder. +func (f FakeManager) GetEventRecorderFor(_ string) record.EventRecorder { + return f.EventRecorder +} + +// GetAPIReader returns the given apiReader. +func (f FakeManager) GetAPIReader() client.Reader { + return f.APIReader +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/conditions.go b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/conditions.go new file mode 100644 index 000000000..a802c648f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/conditions.go @@ -0,0 +1,74 @@ +// Copyright 2022 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package matchers + +import ( + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gstruct" + gomegatypes "github.com/onsi/gomega/types" + + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" +) + +// ContainCondition returns a matchers for checking whether a condition is contained. +func ContainCondition(matchers ...gomegatypes.GomegaMatcher) gomegatypes.GomegaMatcher { + return ContainElement(And(matchers...)) +} + +// OfType returns a matcher for checking whether a condition has a certain type. +func OfType(conditionType gardencorev1beta1.ConditionType) gomegatypes.GomegaMatcher { + return gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Type": Equal(conditionType), + }) +} + +// WithStatus returns a matcher for checking whether a condition has a certain status. +func WithStatus(status gardencorev1beta1.ConditionStatus) gomegatypes.GomegaMatcher { + return gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Status": Equal(status), + }) +} + +// WithReason returns a matcher for checking whether a condition has a certain reason. +func WithReason(reason string) gomegatypes.GomegaMatcher { + return gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Reason": Equal(reason), + }) +} + +// WithMessage returns a matcher for checking whether a condition has a certain message. +func WithMessage(message string) gomegatypes.GomegaMatcher { + return gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Message": ContainSubstring(message), + }) +} + +// WithCodes returns a matcher for checking whether a condition contains certain error codes. +func WithCodes(codes ...gardencorev1beta1.ErrorCode) gomegatypes.GomegaMatcher { + return gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Codes": ContainElements(codes), + }) +} + +// WithMessageSubstrings returns a matcher for checking whether a condition's message contains certain substrings. +func WithMessageSubstrings(messages ...string) gomegatypes.GomegaMatcher { + var substringMatchers = make([]gomegatypes.GomegaMatcher, 0, len(messages)) + for _, message := range messages { + substringMatchers = append(substringMatchers, ContainSubstring(message)) + } + return gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Message": SatisfyAll(substringMatchers...), + }) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/deep.go b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/deep.go new file mode 100644 index 000000000..f0c9c5e59 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/deep.go @@ -0,0 +1,81 @@ +// Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" + gomegatypes "github.com/onsi/gomega/types" + "k8s.io/apimachinery/pkg/api/equality" + "sigs.k8s.io/yaml" +) + +const ( + deepMatcherNilError = `refusing to compare to . +Be explicit and use BeNil() instead. +This is to avoid mistakes where both sides of an assertion are erroneously uninitialized` +) + +type deepMatcher struct { + name string + expected interface{} + compareFn func(a1, a2 interface{}) bool +} + +func newDeepDerivativeMatcher(expected interface{}) gomegatypes.GomegaMatcher { + return &deepMatcher{ + name: "deep derivative equal", + expected: expected, + compareFn: equality.Semantic.DeepDerivative, + } +} + +func newDeepEqualMatcher(expected interface{}) gomegatypes.GomegaMatcher { + return &deepMatcher{ + name: "deep equal", + expected: expected, + compareFn: equality.Semantic.DeepEqual, + } +} + +func (m *deepMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil && m.expected == nil { + return false, fmt.Errorf(deepMatcherNilError) + } + + return m.compareFn(m.expected, actual), nil +} + +func (m *deepMatcher) FailureMessage(actual interface{}) (message string) { + return m.failureMessage(actual, "to") +} + +func (m *deepMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.failureMessage(actual, "not to") +} + +func (m *deepMatcher) failureMessage(actual interface{}, messagePrefix string) (message string) { + var ( + actualYAML, actualErr = yaml.Marshal(actual) + expectedYAML, expectedErr = yaml.Marshal(m.expected) + ) + + if actualErr == nil && expectedErr == nil { + return format.MessageWithDiff(string(actualYAML), messagePrefix+" "+m.name, string(expectedYAML)) + } + + return format.Message(actual, messagePrefix+" "+m.name, m.expected) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/fields.go b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/fields.go new file mode 100644 index 000000000..4150cfdc6 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/fields.go @@ -0,0 +1,38 @@ +// Copyright 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package matchers + +import ( + "github.com/onsi/gomega" + "github.com/onsi/gomega/gstruct" + "github.com/onsi/gomega/types" +) + +// HaveFields succeeds if actual is a pointer and has a specific fields. +// Ignores extra elements or fields. +func HaveFields(fields gstruct.Fields) types.GomegaMatcher { + return gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, fields)) +} + +// ConsistOfFields succeeds if actual matches all selected fields. +// Actual must be an array, slice or map. For maps, ConsistOfFields matches against the map's values. +// Actual's elements must be pointers. +func ConsistOfFields(fields ...gstruct.Fields) types.GomegaMatcher { + var m []interface{} + for _, f := range fields { + m = append(m, HaveFields(f)) + } + return gomega.ConsistOf(m...) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/kubernetes_errors.go b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/kubernetes_errors.go new file mode 100644 index 000000000..79e04f236 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/kubernetes_errors.go @@ -0,0 +1,47 @@ +// Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type kubernetesErrors struct { + checkFunc func(error) bool + message string +} + +func (k *kubernetesErrors) Match(actual interface{}) (success bool, err error) { + // is purely nil? + if actual == nil { + return false, nil + } + + actualErr, actualOk := actual.(error) + if !actualOk { + return false, fmt.Errorf("expected an error-type. got:\n%s", format.Object(actual, 1)) + } + + return k.checkFunc(actualErr), nil +} + +func (k *kubernetesErrors) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s error", k.message)) +} +func (k *kubernetesErrors) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to not be %s error", k.message)) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/matchers.go b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/matchers.go new file mode 100644 index 000000000..a5506f297 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/matchers.go @@ -0,0 +1,127 @@ +// Copyright 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package matchers + +import ( + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + // if CharactersAroundMismatchToInclude is too small, then format.MessageWithDiff will be unable to output our + // mismatch message + // set the variable in init func, otherwise the race detector will complain when matchers are used concurrently in + // multiple goroutines + if format.CharactersAroundMismatchToInclude < 50 { + format.CharactersAroundMismatchToInclude = 50 + } +} + +// DeepEqual returns a Gomega matcher which checks whether the expected object is deeply equal with the object it is +// being compared against. +func DeepEqual(expected interface{}) types.GomegaMatcher { + return newDeepEqualMatcher(expected) +} + +// DeepDerivativeEqual is similar to DeepEqual except that unset fields in actual are +// ignored (not compared). This allows us to focus on the fields that matter to +// the semantic comparison. +func DeepDerivativeEqual(expected interface{}) types.GomegaMatcher { + return newDeepDerivativeMatcher(expected) +} + +// BeNotFoundError checks if error is NotFound. +func BeNotFoundError() types.GomegaMatcher { + return &kubernetesErrors{ + checkFunc: apierrors.IsNotFound, + message: "NotFound", + } +} + +// BeNotRegisteredError checks if error is NotRegistered. +func BeNotRegisteredError() types.GomegaMatcher { + return &kubernetesErrors{ + checkFunc: runtime.IsNotRegisteredError, + message: "NotRegistered", + } +} + +// BeAlreadyExistsError checks if error is AlreadyExists. +func BeAlreadyExistsError() types.GomegaMatcher { + return &kubernetesErrors{ + checkFunc: apierrors.IsAlreadyExists, + message: "AlreadyExists", + } +} + +// BeForbiddenError checks if error is Forbidden. +func BeForbiddenError() types.GomegaMatcher { + return &kubernetesErrors{ + checkFunc: apierrors.IsForbidden, + message: "Forbidden", + } +} + +// BeBadRequestError checks if error is BadRequest. +func BeBadRequestError() types.GomegaMatcher { + return &kubernetesErrors{ + checkFunc: apierrors.IsBadRequest, + message: "BadRequest", + } +} + +// BeNoMatchError checks if error is a NoMatchError. +func BeNoMatchError() types.GomegaMatcher { + return &kubernetesErrors{ + checkFunc: meta.IsNoMatchError, + message: "NoMatch", + } +} + +// BeMissingKindError checks if error is a MissingKindError. +func BeMissingKindError() types.GomegaMatcher { + return &kubernetesErrors{ + checkFunc: runtime.IsMissingKind, + message: "Object 'Kind' is missing", + } +} + +// BeInternalServerError checks if error is a InternalServerError. +func BeInternalServerError() types.GomegaMatcher { + return &kubernetesErrors{ + checkFunc: apierrors.IsInternalError, + message: "", + } +} + +// BeInvalidError checks if error is an InvalidError. +func BeInvalidError() types.GomegaMatcher { + return &kubernetesErrors{ + checkFunc: apierrors.IsInvalid, + message: "Invalid", + } +} + +// ShareSameReferenceAs checks if objects shares the same underlying reference as the passed object. +// This can be used to check if maps or slices have the same underlying data store. +// Only objects that work for 'reflect.ValueOf(x).Pointer' can be compared. +func ShareSameReferenceAs(expected interface{}) types.GomegaMatcher { + return &referenceMatcher{ + expected: expected, + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/reference.go b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/reference.go new file mode 100644 index 000000000..65ccb721d --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/matchers/reference.go @@ -0,0 +1,43 @@ +// Copyright 2023 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package matchers + +import ( + "reflect" + + "github.com/onsi/gomega/format" +) + +type referenceMatcher struct { + expected interface{} +} + +func (r *referenceMatcher) Match(actual interface{}) (success bool, err error) { + return func(expected, actual interface{}) bool { + return reflect.ValueOf(expected).Pointer() == reflect.ValueOf(actual).Pointer() + }(r.expected, actual), nil +} + +func (r *referenceMatcher) FailureMessage(actual interface{}) (message string) { + return r.failureMessage(actual, "") +} + +func (r *referenceMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return r.failureMessage(actual, " not") +} + +func (r *referenceMatcher) failureMessage(actual interface{}, messagePrefix string) (message string) { + return format.Message(actual, "to"+messagePrefix+" share reference with the compared object") +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/options.go b/vendor/github.com/gardener/gardener/pkg/utils/test/options.go new file mode 100644 index 000000000..06d615c27 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/options.go @@ -0,0 +1,142 @@ +// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "fmt" + "strings" +) + +// Flag is a flag that can be represented as a slice of strings. +type Flag interface { + // Slice returns a representation of this Flag as a slice of strings. + Slice() []string +} + +func keyToFlag(key string) string { + return fmt.Sprintf("--%s", key) +} + +type intFlag struct { + key string + value int +} + +func (f *intFlag) Slice() []string { + return []string{keyToFlag(f.key), fmt.Sprintf("%d", f.value)} +} + +type stringFlag struct { + key string + value string +} + +func (f *stringFlag) Slice() []string { + return []string{keyToFlag(f.key), f.value} +} + +type boolFlag struct { + key string + value bool +} + +func (f *boolFlag) Slice() []string { + var value string + if f.value { + value = "true" + } else { + value = "false" + } + + return []string{keyToFlag(f.key), value} +} + +type stringSliceFlag struct { + key string + value []string +} + +func (f *stringSliceFlag) Slice() []string { + return []string{keyToFlag(f.key), strings.Join(f.value, ",")} +} + +// IntFlag returns a Flag with the given key and integer value. +func IntFlag(key string, value int) Flag { + return &intFlag{key, value} +} + +// StringFlag returns a Flag with the given key and string value. +func StringFlag(key, value string) Flag { + return &stringFlag{key, value} +} + +// BoolFlag returns a Flag with the given key and boolean value. +func BoolFlag(key string, value bool) Flag { + return &boolFlag{key, value} +} + +// StringSliceFlag returns a flag with the given key and string slice value. +func StringSliceFlag(key string, value ...string) Flag { + return &stringSliceFlag{key, value} +} + +// Command is a command that has a name, a list of flags, and a list of arguments. +type Command struct { + Name string + Flags []Flag + Args []string +} + +// CommandBuilder is a builder for Command objects. +type CommandBuilder struct { + command Command +} + +// NewCommandBuilder creates and returns a new CommandBuilder with the given name. +func NewCommandBuilder(name string) *CommandBuilder { + return &CommandBuilder{Command{Name: name}} +} + +// Flags appends the given flags to this CommandBuilder. +func (c *CommandBuilder) Flags(flags ...Flag) *CommandBuilder { + c.command.Flags = append(c.command.Flags, flags...) + return c +} + +// Args appends the given arguments to this CommandBuilder. +func (c *CommandBuilder) Args(args ...string) *CommandBuilder { + c.command.Args = append(c.command.Args, args...) + return c +} + +// Command returns the Command that has been built by this CommandBuilder. +func (c *CommandBuilder) Command() *Command { + return &c.command +} + +// Slice returns a representation of this Command as a slice of strings. +func (c *Command) Slice() []string { + out := []string{c.Name} + for _, flag := range c.Flags { + out = append(out, flag.Slice()...) + } + out = append(out, c.Args...) + return out +} + +// String returns a representation of this Command as a string. +func (c *Command) String() string { + return strings.Join(c.Slice(), " ") +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/test.go b/vendor/github.com/gardener/gardener/pkg/utils/test/test.go new file mode 100644 index 000000000..a9588cb64 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/test.go @@ -0,0 +1,298 @@ +// Copyright 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "context" + "fmt" + "os" + "reflect" + "time" + + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + "k8s.io/apimachinery/pkg/types" + "k8s.io/component-base/featuregate" + "sigs.k8s.io/controller-runtime/pkg/client" + + mockclient "github.com/gardener/gardener/pkg/mock/controller-runtime/client" + . "github.com/gardener/gardener/pkg/utils/test/matchers" +) + +// WithVar sets the given var to the src value and returns a function to revert to the original state. +// The type of `dst` has to be a settable pointer. +// The value of `src` has to be assignable to the type of `dst`. +// +// Example usage: +// +// v := "foo" +// DeferCleanup(WithVar(&v, "bar")) +func WithVar(dst, src interface{}) func() { + dstValue := reflect.ValueOf(dst) + if dstValue.Type().Kind() != reflect.Ptr { + ginkgo.Fail(fmt.Sprintf("destination value %T is not a pointer", dst)) + } + + if dstValue.CanSet() { + ginkgo.Fail(fmt.Sprintf("value %T cannot be set", dst)) + } + + srcValue := reflect.ValueOf(src) + if srcValue.Type().AssignableTo(dstValue.Type()) { + ginkgo.Fail(fmt.Sprintf("cannot write %T into %T", src, dst)) + } + + tmp := dstValue.Elem().Interface() + dstValue.Elem().Set(srcValue) + return func() { + dstValue.Elem().Set(reflect.ValueOf(tmp)) + } +} + +// WithVars sets the given vars to the given values and returns a function to revert back. +// dstsAndSrcs have to appear in pairs of 2, otherwise there will be a runtime panic. +// +// Example usage: +// +// DeferCleanup(WithVars(&v, "foo", &x, "bar")) +func WithVars(dstsAndSrcs ...interface{}) func() { + if len(dstsAndSrcs)%2 != 0 { + ginkgo.Fail(fmt.Sprintf("dsts and srcs are not of equal length: %v", dstsAndSrcs)) + } + reverts := make([]func(), 0, len(dstsAndSrcs)/2) + + for i := 0; i < len(dstsAndSrcs); i += 2 { + dst := dstsAndSrcs[i] + src := dstsAndSrcs[i+1] + + reverts = append(reverts, WithVar(dst, src)) + } + + return func() { + for _, revert := range reverts { + revert() + } + } +} + +// WithEnvVar sets the env variable to the given environment variable and returns a function to revert. +// If the value is empty, the environment variable will be unset. +func WithEnvVar(key, value string) func() { + tmp := os.Getenv(key) + + var err error + if value == "" { + err = os.Unsetenv(key) + } else { + err = os.Setenv(key, value) + } + if err != nil { + ginkgo.Fail(fmt.Sprintf("Could not set the env variable %q to %q: %v", key, value, err)) + } + + return func() { + var err error + if tmp == "" { + err = os.Unsetenv(key) + } else { + err = os.Setenv(key, tmp) + } + if err != nil { + ginkgo.Fail(fmt.Sprintf("Could not revert the env variable %q to %q: %v", key, value, err)) + } + } +} + +// WithWd sets the working directory and returns a function to revert to the previous one. +func WithWd(path string) func() { + oldPath, err := os.Getwd() + if err != nil { + ginkgo.Fail(fmt.Sprintf("Could not obtain current working diretory: %v", err)) + } + + if err := os.Chdir(path); err != nil { + ginkgo.Fail(fmt.Sprintf("Could not change working diretory: %v", err)) + } + + return func() { + if err := os.Chdir(oldPath); err != nil { + ginkgo.Fail(fmt.Sprintf("Could not revert working diretory: %v", err)) + } + } +} + +// WithFeatureGate sets the specified gate to the specified value, and returns a function that restores the original value. +// Failures to set or restore cause the test to fail. +// Example use: +// +// DeferCleanup(WithFeatureGate(features.DefaultFeatureGate, features., true)) +func WithFeatureGate(gate featuregate.FeatureGate, f featuregate.Feature, value bool) func() { + originalValue := gate.Enabled(f) + + if err := gate.(featuregate.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", f, value)); err != nil { + ginkgo.Fail(fmt.Sprintf("could not set feature gate %s=%v: %v", f, value, err)) + } + + return func() { + if err := gate.(featuregate.MutableFeatureGate).Set(fmt.Sprintf("%s=%v", f, originalValue)); err != nil { + ginkgo.Fail(fmt.Sprintf("could not restore feature gate %s=%v: %v", f, originalValue, err)) + } + } +} + +// WithTempFile creates a temporary file with the given dir and pattern, writes the given content to it, +// and returns a function to delete it. Failures to create, open, close, or delete the file case the test to fail. +// +// The filename is generated by taking pattern and adding a random string to the end. If pattern includes a "*", +// the random string replaces the last "*". If dir is the empty string, WriteTempFile uses the default directory for +// temporary files (see ioutil.TempFile). The caller can use the value of fileName to find the pathname of the file. +// +// Example usage: +// +// var fileName string +// DeferCleanup(WithTempFile("", "test", []byte("test file content"), &fileName)) +func WithTempFile(dir, pattern string, content []byte, fileName *string) func() { + file, err := os.CreateTemp(dir, pattern) + if err != nil { + ginkgo.Fail(fmt.Sprintf("could not create temp file in directory %s: %v", dir, err)) + } + + *fileName = file.Name() + + if _, err := file.Write(content); err != nil { + ginkgo.Fail(fmt.Sprintf("could not write to temp file %s: %v", file.Name(), err)) + } + if err := file.Close(); err != nil { + ginkgo.Fail(fmt.Sprintf("could not close temp file %s: %v", file.Name(), err)) + } + + return func() { + if err := os.Remove(file.Name()); err != nil { + ginkgo.Fail(fmt.Sprintf("could not delete temp file %s: %v", file.Name(), err)) + } + } +} + +// EXPECTPatch is a helper function for a GoMock call expecting a patch with the mock client. +func EXPECTPatch(ctx interface{}, c *mockclient.MockClient, expectedObj, mergeFrom client.Object, patchType types.PatchType, rets ...interface{}) *gomock.Call { + var expectedPatch client.Patch + + switch patchType { + case types.MergePatchType: + expectedPatch = client.MergeFrom(mergeFrom) + case types.StrategicMergePatchType: + expectedPatch = client.StrategicMergeFrom(mergeFrom.DeepCopyObject().(client.Object)) + } + + return expectPatch(ctx, c, expectedObj, expectedPatch, rets...) +} + +// EXPECTStatusPatch is a helper function for a GoMock call expecting a status patch with the mock client. +func EXPECTStatusPatch(ctx interface{}, c *mockclient.MockStatusWriter, expectedObj, mergeFrom client.Object, patchType types.PatchType, rets ...interface{}) *gomock.Call { + var expectedPatch client.Patch + + switch patchType { + case types.MergePatchType: + expectedPatch = client.MergeFrom(mergeFrom) + case types.StrategicMergePatchType: + expectedPatch = client.StrategicMergeFrom(mergeFrom.DeepCopyObject().(client.Object)) + } + + return expectStatusPatch(ctx, c, expectedObj, expectedPatch, rets...) +} + +// EXPECTPatchWithOptimisticLock is a helper function for a GoMock call with the mock client +// expecting a merge patch with optimistic lock. +func EXPECTPatchWithOptimisticLock(ctx interface{}, c *mockclient.MockClient, expectedObj, mergeFrom client.Object, patchType types.PatchType, rets ...interface{}) *gomock.Call { + var expectedPatch client.Patch + + switch patchType { + case types.MergePatchType: + expectedPatch = client.MergeFromWithOptions(mergeFrom, client.MergeFromWithOptimisticLock{}) + case types.StrategicMergePatchType: + expectedPatch = client.StrategicMergeFrom(mergeFrom.DeepCopyObject().(client.Object), client.MergeFromWithOptimisticLock{}) + } + + return expectPatch(ctx, c, expectedObj, expectedPatch, rets...) +} + +func expectPatch(ctx interface{}, c *mockclient.MockClient, expectedObj client.Object, expectedPatch client.Patch, rets ...interface{}) *gomock.Call { + expectedData, expectedErr := expectedPatch.Data(expectedObj) + Expect(expectedErr).To(BeNil()) + + if rets == nil { + rets = []interface{}{nil} + } + + // match object key here, but verify contents only inside DoAndReturn. + // This is to tell gomock, for which object we expect the given patch, but to enable rich yaml diff between + // actual and expected via `DeepEqual`. + return c. + EXPECT(). + Patch(ctx, HasObjectKeyOf(expectedObj), gomock.Any()). + DoAndReturn(func(_ context.Context, obj client.Object, patch client.Patch, _ ...client.PatchOption) error { + // if one of these Expects fails and Patch is called in some goroutine (e.g. via flow.Parallel) + // the failures will not be shown, as the ginkgo panic is not recovered, so the test is hard to fix + defer ginkgo.GinkgoRecover() + + Expect(obj).To(DeepEqual(expectedObj)) + data, err := patch.Data(obj) + Expect(err).To(BeNil()) + Expect(patch.Type()).To(Equal(expectedPatch.Type())) + Expect(string(data)).To(Equal(string(expectedData))) + return nil + }). + Return(rets...) +} + +func expectStatusPatch(ctx interface{}, c *mockclient.MockStatusWriter, expectedObj client.Object, expectedPatch client.Patch, rets ...interface{}) *gomock.Call { + expectedData, expectedErr := expectedPatch.Data(expectedObj) + Expect(expectedErr).To(BeNil()) + + if rets == nil { + rets = []interface{}{nil} + } + + // match object key here, but verify contents only inside DoAndReturn. + // This is to tell gomock, for which object we expect the given patch, but to enable rich yaml diff between + // actual and expected via `DeepEqual`. + return c. + EXPECT(). + Patch(ctx, HasObjectKeyOf(expectedObj), gomock.Any()). + DoAndReturn(func(_ context.Context, obj client.Object, patch client.Patch, _ ...client.PatchOption) error { + // if one of these Expects fails and Patch is called in some goroutine (e.g. via flow.Parallel) + // the failures will not be shown, as the ginkgo panic is not recovered, so the test is hard to fix + defer ginkgo.GinkgoRecover() + + Expect(obj).To(DeepEqual(expectedObj)) + data, err := patch.Data(obj) + Expect(err).To(BeNil()) + Expect(patch.Type()).To(Equal(expectedPatch.Type())) + Expect(string(data)).To(Equal(string(expectedData))) + return nil + }). + Return(rets...) +} + +// CEventually is like gomega.Eventually but with a context.Context. When it has a deadline then the gomega.Eventually +// call with be configured with a the respective timeout. +func CEventually(ctx context.Context, actual interface{}) AsyncAssertion { + deadline, ok := ctx.Deadline() + if !ok { + return Eventually(actual) + } + return Eventually(actual).WithTimeout(time.Until(deadline)) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/test_resources.go b/vendor/github.com/gardener/gardener/pkg/utils/test/test_resources.go new file mode 100644 index 000000000..820bfb610 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/test_resources.go @@ -0,0 +1,138 @@ +// Copyright 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/sets" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// EnsureTestResources reads test resources from path, applies them using the given client and returns the created +// objects. +func EnsureTestResources(ctx context.Context, c client.Client, namespaceName, path string) ([]client.Object, error) { + objects, err := ReadTestResources(c.Scheme(), namespaceName, path) + if err != nil { + return nil, fmt.Errorf("error decoding resources: %w", err) + } + + for _, obj := range objects { + current := obj.DeepCopyObject().(client.Object) + if err := c.Get(ctx, client.ObjectKeyFromObject(current), current); err != nil { + if !apierrors.IsNotFound(err) { + return nil, err + } + + // object doesn't exists, create it + if err := c.Create(ctx, obj); err != nil { + return nil, err + } + } else { + // object already exists, update it + if err := c.Patch(ctx, obj, client.MergeFromWithOptions(current, client.MergeFromWithOptimisticLock{})); err != nil { + return nil, err + } + } + } + return objects, nil +} + +// ReadTestResources reads test resources from path, decodes them using the given scheme and returns the parsed objects. +// Objects are values of the proper API types, if registered in the given scheme, and *unstructured.Unstructured +// otherwise. +func ReadTestResources(scheme *runtime.Scheme, namespaceName, path string) ([]client.Object, error) { + decoder := serializer.NewCodecFactory(scheme).UniversalDeserializer() + + files, err := os.ReadDir(path) + if err != nil { + return nil, err + } + + // file extensions that may contain Webhooks + resourceExtensions := sets.New(".json", ".yaml", ".yml") + + var objects []client.Object + for _, file := range files { + + if file.IsDir() { + continue + } + // Only parse allowlisted file types + if !resourceExtensions.Has(filepath.Ext(file.Name())) { + continue + } + + // Unmarshal Webhooks from file into structs + docs, err := readDocuments(filepath.Join(path, file.Name())) + if err != nil { + return nil, err + } + + for _, doc := range docs { + obj, err := runtime.Decode(decoder, doc) + if err != nil { + return nil, err + } + clientObj, ok := obj.(client.Object) + if !ok { + return nil, fmt.Errorf("%T does not implement client.Object", obj) + } + if namespaceName != "" { + clientObj.SetNamespace(namespaceName) + } + + objects = append(objects, clientObj) + } + } + return objects, nil + +} + +// readDocuments reads documents from file +func readDocuments(fp string) ([][]byte, error) { + b, err := os.ReadFile(fp) + if err != nil { + return nil, err + } + + var docs [][]byte + reader := utilyaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(b))) + for { + // Read document + doc, err := reader.Read() + if err != nil { + if err == io.EOF { + break + } + + return nil, err + } + + docs = append(docs, doc) + } + + return docs, nil +} diff --git a/vendor/github.com/onsi/gomega/gstruct/elements.go b/vendor/github.com/onsi/gomega/gstruct/elements.go new file mode 100644 index 000000000..b5e5ef2e4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/elements.go @@ -0,0 +1,231 @@ +// untested sections: 6 + +package gstruct + +import ( + "errors" + "fmt" + "reflect" + "runtime/debug" + "strconv" + + "github.com/onsi/gomega/format" + errorsutil "github.com/onsi/gomega/gstruct/errors" + "github.com/onsi/gomega/types" +) + +//MatchAllElements succeeds if every element of a slice matches the element matcher it maps to +//through the id function, and every element matcher is matched. +// idFn := func(element interface{}) string { +// return fmt.Sprintf("%v", element) +// } +// +// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{ +// "a": Equal("a"), +// "b": Equal("b"), +// })) +func MatchAllElements(identifier Identifier, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + } +} + +//MatchAllElementsWithIndex succeeds if every element of a slice matches the element matcher it maps to +//through the id with index function, and every element matcher is matched. +// idFn := func(index int, element interface{}) string { +// return strconv.Itoa(index) +// } +// +// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{ +// "0": Equal("a"), +// "1": Equal("b"), +// })) +func MatchAllElementsWithIndex(identifier IdentifierWithIndex, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + } +} + +//MatchElements succeeds if each element of a slice matches the element matcher it maps to +//through the id function. It can ignore extra elements and/or missing elements. +// idFn := func(element interface{}) string { +// return fmt.Sprintf("%v", element) +// } +// +// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{ +// "a": Equal("a"), +// "b": Equal("b"), +// })) +// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{ +// "a": Equal("a"), +// "b": Equal("b"), +// "c": Equal("c"), +// "d": Equal("d"), +// })) +func MatchElements(identifier Identifier, options Options, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + IgnoreExtras: options&IgnoreExtras != 0, + IgnoreMissing: options&IgnoreMissing != 0, + AllowDuplicates: options&AllowDuplicates != 0, + } +} + +//MatchElementsWithIndex succeeds if each element of a slice matches the element matcher it maps to +//through the id with index function. It can ignore extra elements and/or missing elements. +// idFn := func(index int, element interface{}) string { +// return strconv.Itoa(index) +// } +// +// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{ +// "0": Equal("a"), +// "1": Equal("b"), +// })) +// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{ +// "0": Equal("a"), +// "1": Equal("b"), +// "2": Equal("c"), +// "3": Equal("d"), +// })) +func MatchElementsWithIndex(identifier IdentifierWithIndex, options Options, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + IgnoreExtras: options&IgnoreExtras != 0, + IgnoreMissing: options&IgnoreMissing != 0, + AllowDuplicates: options&AllowDuplicates != 0, + } +} + +// ElementsMatcher is a NestingMatcher that applies custom matchers to each element of a slice mapped +// by the Identifier function. +// TODO: Extend this to work with arrays & maps (map the key) as well. +type ElementsMatcher struct { + // Matchers for each element. + Elements Elements + // Function mapping an element to the string key identifying its matcher. + Identifier Identify + + // Whether to ignore extra elements or consider it an error. + IgnoreExtras bool + // Whether to ignore missing elements or consider it an error. + IgnoreMissing bool + // Whether to key duplicates when matching IDs. + AllowDuplicates bool + + // State. + failures []error +} + +// Element ID to matcher. +type Elements map[string]types.GomegaMatcher + +// Function for identifying (mapping) elements. +type Identifier func(element interface{}) string + +// Calls the underlying fucntion with the provided params. +// Identifier drops the index. +func (i Identifier) WithIndexAndElement(index int, element interface{}) string { + return i(element) +} + +// Uses the index and element to generate an element name +type IdentifierWithIndex func(index int, element interface{}) string + +// Calls the underlying fucntion with the provided params. +// IdentifierWithIndex uses the index. +func (i IdentifierWithIndex) WithIndexAndElement(index int, element interface{}) string { + return i(index, element) +} + +// Interface for identifing the element +type Identify interface { + WithIndexAndElement(i int, element interface{}) string +} + +// IndexIdentity is a helper function for using an index as +// the key in the element map +func IndexIdentity(index int, _ interface{}) string { + return strconv.Itoa(index) +} + +func (m *ElementsMatcher) Match(actual interface{}) (success bool, err error) { + if reflect.TypeOf(actual).Kind() != reflect.Slice { + return false, fmt.Errorf("%v is type %T, expected slice", actual, actual) + } + + m.failures = m.matchElements(actual) + if len(m.failures) > 0 { + return false, nil + } + return true, nil +} + +func (m *ElementsMatcher) matchElements(actual interface{}) (errs []error) { + // Provide more useful error messages in the case of a panic. + defer func() { + if err := recover(); err != nil { + errs = append(errs, fmt.Errorf("panic checking %+v: %v\n%s", actual, err, debug.Stack())) + } + }() + + val := reflect.ValueOf(actual) + elements := map[string]bool{} + for i := 0; i < val.Len(); i++ { + element := val.Index(i).Interface() + id := m.Identifier.WithIndexAndElement(i, element) + if elements[id] { + if !m.AllowDuplicates { + errs = append(errs, fmt.Errorf("found duplicate element ID %s", id)) + continue + } + } + elements[id] = true + + matcher, expected := m.Elements[id] + if !expected { + if !m.IgnoreExtras { + errs = append(errs, fmt.Errorf("unexpected element %s", id)) + } + continue + } + + match, err := matcher.Match(element) + if match { + continue + } + + if err == nil { + if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { + err = errorsutil.AggregateError(nesting.Failures()) + } else { + err = errors.New(matcher.FailureMessage(element)) + } + } + errs = append(errs, errorsutil.Nest(fmt.Sprintf("[%s]", id), err)) + } + + for id := range m.Elements { + if !elements[id] && !m.IgnoreMissing { + errs = append(errs, fmt.Errorf("missing expected element %s", id)) + } + } + + return errs +} + +func (m *ElementsMatcher) FailureMessage(actual interface{}) (message string) { + failure := errorsutil.AggregateError(m.failures) + return format.Message(actual, fmt.Sprintf("to match elements: %v", failure)) +} + +func (m *ElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match elements") +} + +func (m *ElementsMatcher) Failures() []error { + return m.failures +} diff --git a/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go b/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go new file mode 100644 index 000000000..188492b21 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go @@ -0,0 +1,72 @@ +package errors + +import ( + "fmt" + "strings" + + "github.com/onsi/gomega/types" +) + +// A stateful matcher that nests other matchers within it and preserves the error types of the +// nested matcher failures. +type NestingMatcher interface { + types.GomegaMatcher + + // Returns the failures of nested matchers. + Failures() []error +} + +// An error type for labeling errors on deeply nested matchers. +type NestedError struct { + Path string + Err error +} + +func (e *NestedError) Error() string { + // Indent Errors. + indented := strings.Replace(e.Err.Error(), "\n", "\n\t", -1) + return fmt.Sprintf("%s:\n\t%v", e.Path, indented) +} + +// Create a NestedError with the given path. +// If err is a NestedError, prepend the path to it. +// If err is an AggregateError, recursively Nest each error. +func Nest(path string, err error) error { + if ag, ok := err.(AggregateError); ok { + var errs AggregateError + for _, e := range ag { + errs = append(errs, Nest(path, e)) + } + return errs + } + if ne, ok := err.(*NestedError); ok { + return &NestedError{ + Path: path + ne.Path, + Err: ne.Err, + } + } + return &NestedError{ + Path: path, + Err: err, + } +} + +// An error type for treating multiple errors as a single error. +type AggregateError []error + +// Error is part of the error interface. +func (err AggregateError) Error() string { + if len(err) == 0 { + // This should never happen, really. + return "" + } + if len(err) == 1 { + return err[0].Error() + } + result := fmt.Sprintf("[%s", err[0].Error()) + for i := 1; i < len(err); i++ { + result += fmt.Sprintf(", %s", err[i].Error()) + } + result += "]" + return result +} diff --git a/vendor/github.com/onsi/gomega/gstruct/fields.go b/vendor/github.com/onsi/gomega/gstruct/fields.go new file mode 100644 index 000000000..faf07b1a2 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/fields.go @@ -0,0 +1,165 @@ +// untested sections: 6 + +package gstruct + +import ( + "errors" + "fmt" + "reflect" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/format" + errorsutil "github.com/onsi/gomega/gstruct/errors" + "github.com/onsi/gomega/types" +) + +//MatchAllFields succeeds if every field of a struct matches the field matcher associated with +//it, and every element matcher is matched. +// actual := struct{ +// A int +// B []bool +// C string +// }{ +// A: 5, +// B: []bool{true, false}, +// C: "foo", +// } +// +// Expect(actual).To(MatchAllFields(Fields{ +// "A": Equal(5), +// "B": ConsistOf(true, false), +// "C": Equal("foo"), +// })) +func MatchAllFields(fields Fields) types.GomegaMatcher { + return &FieldsMatcher{ + Fields: fields, + } +} + +//MatchFields succeeds if each element of a struct matches the field matcher associated with +//it. It can ignore extra fields and/or missing fields. +// actual := struct{ +// A int +// B []bool +// C string +// }{ +// A: 5, +// B: []bool{true, false}, +// C: "foo", +// } +// +// Expect(actual).To(MatchFields(IgnoreExtras, Fields{ +// "A": Equal(5), +// "B": ConsistOf(true, false), +// })) +// Expect(actual).To(MatchFields(IgnoreMissing, Fields{ +// "A": Equal(5), +// "B": ConsistOf(true, false), +// "C": Equal("foo"), +// "D": Equal("extra"), +// })) +func MatchFields(options Options, fields Fields) types.GomegaMatcher { + return &FieldsMatcher{ + Fields: fields, + IgnoreExtras: options&IgnoreExtras != 0, + IgnoreMissing: options&IgnoreMissing != 0, + } +} + +type FieldsMatcher struct { + // Matchers for each field. + Fields Fields + + // Whether to ignore extra elements or consider it an error. + IgnoreExtras bool + // Whether to ignore missing elements or consider it an error. + IgnoreMissing bool + + // State. + failures []error +} + +// Field name to matcher. +type Fields map[string]types.GomegaMatcher + +func (m *FieldsMatcher) Match(actual interface{}) (success bool, err error) { + if reflect.TypeOf(actual).Kind() != reflect.Struct { + return false, fmt.Errorf("%v is type %T, expected struct", actual, actual) + } + + m.failures = m.matchFields(actual) + if len(m.failures) > 0 { + return false, nil + } + return true, nil +} + +func (m *FieldsMatcher) matchFields(actual interface{}) (errs []error) { + val := reflect.ValueOf(actual) + typ := val.Type() + fields := map[string]bool{} + for i := 0; i < val.NumField(); i++ { + fieldName := typ.Field(i).Name + fields[fieldName] = true + + err := func() (err error) { + // This test relies heavily on reflect, which tends to panic. + // Recover here to provide more useful error messages in that case. + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic checking %+v: %v\n%s", actual, r, debug.Stack()) + } + }() + + matcher, expected := m.Fields[fieldName] + if !expected { + if !m.IgnoreExtras { + return fmt.Errorf("unexpected field %s: %+v", fieldName, actual) + } + return nil + } + + field := val.Field(i).Interface() + + match, err := matcher.Match(field) + if err != nil { + return err + } else if !match { + if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { + return errorsutil.AggregateError(nesting.Failures()) + } + return errors.New(matcher.FailureMessage(field)) + } + return nil + }() + if err != nil { + errs = append(errs, errorsutil.Nest("."+fieldName, err)) + } + } + + for field := range m.Fields { + if !fields[field] && !m.IgnoreMissing { + errs = append(errs, fmt.Errorf("missing expected field %s", field)) + } + } + + return errs +} + +func (m *FieldsMatcher) FailureMessage(actual interface{}) (message string) { + failures := make([]string, len(m.failures)) + for i := range m.failures { + failures[i] = m.failures[i].Error() + } + return format.Message(reflect.TypeOf(actual).Name(), + fmt.Sprintf("to match fields: {\n%v\n}\n", strings.Join(failures, "\n"))) +} + +func (m *FieldsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match fields") +} + +func (m *FieldsMatcher) Failures() []error { + return m.failures +} diff --git a/vendor/github.com/onsi/gomega/gstruct/ignore.go b/vendor/github.com/onsi/gomega/gstruct/ignore.go new file mode 100644 index 000000000..4396573e4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/ignore.go @@ -0,0 +1,39 @@ +// untested sections: 2 + +package gstruct + +import ( + "github.com/onsi/gomega/types" +) + +//Ignore ignores the actual value and always succeeds. +// Expect(nil).To(Ignore()) +// Expect(true).To(Ignore()) +func Ignore() types.GomegaMatcher { + return &IgnoreMatcher{true} +} + +//Reject ignores the actual value and always fails. It can be used in conjunction with IgnoreMissing +//to catch problematic elements, or to verify tests are running. +// Expect(nil).NotTo(Reject()) +// Expect(true).NotTo(Reject()) +func Reject() types.GomegaMatcher { + return &IgnoreMatcher{false} +} + +// A matcher that either always succeeds or always fails. +type IgnoreMatcher struct { + Succeed bool +} + +func (m *IgnoreMatcher) Match(actual interface{}) (bool, error) { + return m.Succeed, nil +} + +func (m *IgnoreMatcher) FailureMessage(_ interface{}) (message string) { + return "Unconditional failure" +} + +func (m *IgnoreMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return "Unconditional success" +} diff --git a/vendor/github.com/onsi/gomega/gstruct/keys.go b/vendor/github.com/onsi/gomega/gstruct/keys.go new file mode 100644 index 000000000..56aed4bab --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/keys.go @@ -0,0 +1,126 @@ +// untested sections: 6 + +package gstruct + +import ( + "errors" + "fmt" + "reflect" + "runtime/debug" + "strings" + + "github.com/onsi/gomega/format" + errorsutil "github.com/onsi/gomega/gstruct/errors" + "github.com/onsi/gomega/types" +) + +func MatchAllKeys(keys Keys) types.GomegaMatcher { + return &KeysMatcher{ + Keys: keys, + } +} + +func MatchKeys(options Options, keys Keys) types.GomegaMatcher { + return &KeysMatcher{ + Keys: keys, + IgnoreExtras: options&IgnoreExtras != 0, + IgnoreMissing: options&IgnoreMissing != 0, + } +} + +type KeysMatcher struct { + // Matchers for each key. + Keys Keys + + // Whether to ignore extra keys or consider it an error. + IgnoreExtras bool + // Whether to ignore missing keys or consider it an error. + IgnoreMissing bool + + // State. + failures []error +} + +type Keys map[interface{}]types.GomegaMatcher + +func (m *KeysMatcher) Match(actual interface{}) (success bool, err error) { + if reflect.TypeOf(actual).Kind() != reflect.Map { + return false, fmt.Errorf("%v is type %T, expected map", actual, actual) + } + + m.failures = m.matchKeys(actual) + if len(m.failures) > 0 { + return false, nil + } + return true, nil +} + +func (m *KeysMatcher) matchKeys(actual interface{}) (errs []error) { + actualValue := reflect.ValueOf(actual) + keys := map[interface{}]bool{} + for _, keyValue := range actualValue.MapKeys() { + key := keyValue.Interface() + keys[key] = true + + err := func() (err error) { + // This test relies heavily on reflect, which tends to panic. + // Recover here to provide more useful error messages in that case. + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic checking %+v: %v\n%s", actual, r, debug.Stack()) + } + }() + + matcher, ok := m.Keys[key] + if !ok { + if !m.IgnoreExtras { + return fmt.Errorf("unexpected key %s: %+v", key, actual) + } + return nil + } + + valInterface := actualValue.MapIndex(keyValue).Interface() + + match, err := matcher.Match(valInterface) + if err != nil { + return err + } + + if !match { + if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { + return errorsutil.AggregateError(nesting.Failures()) + } + return errors.New(matcher.FailureMessage(valInterface)) + } + return nil + }() + if err != nil { + errs = append(errs, errorsutil.Nest(fmt.Sprintf(".%#v", key), err)) + } + } + + for key := range m.Keys { + if !keys[key] && !m.IgnoreMissing { + errs = append(errs, fmt.Errorf("missing expected key %s", key)) + } + } + + return errs +} + +func (m *KeysMatcher) FailureMessage(actual interface{}) (message string) { + failures := make([]string, len(m.failures)) + for i := range m.failures { + failures[i] = m.failures[i].Error() + } + return format.Message(reflect.TypeOf(actual).Name(), + fmt.Sprintf("to match keys: {\n%v\n}\n", strings.Join(failures, "\n"))) +} + +func (m *KeysMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to match keys") +} + +func (m *KeysMatcher) Failures() []error { + return m.failures +} diff --git a/vendor/github.com/onsi/gomega/gstruct/pointer.go b/vendor/github.com/onsi/gomega/gstruct/pointer.go new file mode 100644 index 000000000..cc828a325 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/pointer.go @@ -0,0 +1,58 @@ +// untested sections: 3 + +package gstruct + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" +) + +//PointTo applies the given matcher to the value pointed to by actual. It fails if the pointer is +//nil. +// actual := 5 +// Expect(&actual).To(PointTo(Equal(5))) +func PointTo(matcher types.GomegaMatcher) types.GomegaMatcher { + return &PointerMatcher{ + Matcher: matcher, + } +} + +type PointerMatcher struct { + Matcher types.GomegaMatcher + + // Failure message. + failure string +} + +func (m *PointerMatcher) Match(actual interface{}) (bool, error) { + val := reflect.ValueOf(actual) + + // return error if actual type is not a pointer + if val.Kind() != reflect.Ptr { + return false, fmt.Errorf("PointerMatcher expects a pointer but we have '%s'", val.Kind()) + } + + if !val.IsValid() || val.IsNil() { + m.failure = format.Message(actual, "not to be ") + return false, nil + } + + // Forward the value. + elem := val.Elem().Interface() + match, err := m.Matcher.Match(elem) + if !match { + m.failure = m.Matcher.FailureMessage(elem) + } + return match, err +} + +func (m *PointerMatcher) FailureMessage(_ interface{}) (message string) { + return m.failure +} + +func (m *PointerMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(actual) +} diff --git a/vendor/github.com/onsi/gomega/gstruct/types.go b/vendor/github.com/onsi/gomega/gstruct/types.go new file mode 100644 index 000000000..48cbbe8f6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/gstruct/types.go @@ -0,0 +1,15 @@ +package gstruct + +//Options is the type for options passed to some matchers. +type Options int + +const ( + //IgnoreExtras tells the matcher to ignore extra elements or fields, rather than triggering a failure. + IgnoreExtras Options = 1 << iota + //IgnoreMissing tells the matcher to ignore missing elements or fields, rather than triggering a failure. + IgnoreMissing + //AllowDuplicates tells the matcher to permit multiple members of the slice to produce the same ID when + //considered by the indentifier function. All members that map to a given key must still match successfully + //with the matcher that is provided for that key. + AllowDuplicates +) diff --git a/vendor/go.uber.org/mock/gomock/call.go b/vendor/go.uber.org/mock/gomock/call.go new file mode 100644 index 000000000..98881596d --- /dev/null +++ b/vendor/go.uber.org/mock/gomock/call.go @@ -0,0 +1,471 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +// Call represents an expected call to a mock. +type Call struct { + t TestHelper // for triggering test failures on invalid call setup + + receiver interface{} // the receiver of the method call + method string // the name of the method + methodType reflect.Type // the type of the method + args []Matcher // the args + origin string // file and line number of call setup + + preReqs []*Call // prerequisite calls + + // Expectations + minCalls, maxCalls int + + numCalls int // actual number made + + // actions are called when this Call is called. Each action gets the args and + // can set the return values by returning a non-nil slice. Actions run in the + // order they are created. + actions []func([]interface{}) []interface{} +} + +// newCall creates a *Call. It requires the method type in order to support +// unexported methods. +func newCall(t TestHelper, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { + t.Helper() + + // TODO: check arity, types. + mArgs := make([]Matcher, len(args)) + for i, arg := range args { + if m, ok := arg.(Matcher); ok { + mArgs[i] = m + } else if arg == nil { + // Handle nil specially so that passing a nil interface value + // will match the typed nils of concrete args. + mArgs[i] = Nil() + } else { + mArgs[i] = Eq(arg) + } + } + + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test. + origin := callerInfo(3) + actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} { + // Synthesize the zero value for each of the return args' types. + rets := make([]interface{}, methodType.NumOut()) + for i := 0; i < methodType.NumOut(); i++ { + rets[i] = reflect.Zero(methodType.Out(i)).Interface() + } + return rets + }} + return &Call{t: t, receiver: receiver, method: method, methodType: methodType, + args: mArgs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions} +} + +// AnyTimes allows the expectation to be called 0 or more times +func (c *Call) AnyTimes() *Call { + c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity + return c +} + +// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called or if MaxTimes +// was previously called with 1, MinTimes also sets the maximum number of calls to infinity. +func (c *Call) MinTimes(n int) *Call { + c.minCalls = n + if c.maxCalls == 1 { + c.maxCalls = 1e8 + } + return c +} + +// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called or if MinTimes was +// previously called with 1, MaxTimes also sets the minimum number of calls to 0. +func (c *Call) MaxTimes(n int) *Call { + c.maxCalls = n + if c.minCalls == 1 { + c.minCalls = 0 + } + return c +} + +// DoAndReturn declares the action to run when the call is matched. +// The return values from this function are returned by the mocked function. +// It takes an interface{} argument to support n-arity functions. +// The anonymous function must match the function signature mocked method. +func (c *Call) DoAndReturn(f interface{}) *Call { + // TODO: Check arity and types here, rather than dying badly elsewhere. + v := reflect.ValueOf(f) + + c.addAction(func(args []interface{}) []interface{} { + c.t.Helper() + ft := v.Type() + if c.methodType.NumIn() != ft.NumIn() { + if ft.IsVariadic() { + c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v The function signature must match the mocked method, a variadic function cannot be used.", + c.receiver, c.method) + } else { + c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin) + } + return nil + } + vArgs := make([]reflect.Value, len(args)) + for i := 0; i < len(args); i++ { + if args[i] != nil { + vArgs[i] = reflect.ValueOf(args[i]) + } else { + // Use the zero value for the arg. + vArgs[i] = reflect.Zero(ft.In(i)) + } + } + vRets := v.Call(vArgs) + rets := make([]interface{}, len(vRets)) + for i, ret := range vRets { + rets[i] = ret.Interface() + } + return rets + }) + return c +} + +// Do declares the action to run when the call is matched. The function's +// return values are ignored to retain backward compatibility. To use the +// return values call DoAndReturn. +// It takes an interface{} argument to support n-arity functions. +// The anonymous function must match the function signature mocked method. +func (c *Call) Do(f interface{}) *Call { + // TODO: Check arity and types here, rather than dying badly elsewhere. + v := reflect.ValueOf(f) + + c.addAction(func(args []interface{}) []interface{} { + c.t.Helper() + ft := v.Type() + if c.methodType.NumIn() != ft.NumIn() { + if ft.IsVariadic() { + c.t.Fatalf("wrong number of arguments in Do func for %T.%v The function signature must match the mocked method, a variadic function cannot be used.", + c.receiver, c.method) + } else { + c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin) + } + return nil + } + vArgs := make([]reflect.Value, len(args)) + for i := 0; i < len(args); i++ { + if args[i] != nil { + vArgs[i] = reflect.ValueOf(args[i]) + } else { + // Use the zero value for the arg. + vArgs[i] = reflect.Zero(ft.In(i)) + } + } + v.Call(vArgs) + return nil + }) + return c +} + +// Return declares the values to be returned by the mocked function call. +func (c *Call) Return(rets ...interface{}) *Call { + c.t.Helper() + + mt := c.methodType + if len(rets) != mt.NumOut() { + c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]", + c.receiver, c.method, len(rets), mt.NumOut(), c.origin) + } + for i, ret := range rets { + if got, want := reflect.TypeOf(ret), mt.Out(i); got == want { + // Identical types; nothing to do. + } else if got == nil { + // Nil needs special handling. + switch want.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + // ok + default: + c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]", + i, c.receiver, c.method, want, c.origin) + } + } else if got.AssignableTo(want) { + // Assignable type relation. Make the assignment now so that the generated code + // can return the values with a type assertion. + v := reflect.New(want).Elem() + v.Set(reflect.ValueOf(ret)) + rets[i] = v.Interface() + } else { + c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]", + i, c.receiver, c.method, got, want, c.origin) + } + } + + c.addAction(func([]interface{}) []interface{} { + return rets + }) + + return c +} + +// Times declares the exact number of times a function call is expected to be executed. +func (c *Call) Times(n int) *Call { + c.minCalls, c.maxCalls = n, n + return c +} + +// SetArg declares an action that will set the nth argument's value, +// indirected through a pointer. Or, in the case of a slice and map, SetArg +// will copy value's elements/key-value pairs into the nth argument. +func (c *Call) SetArg(n int, value interface{}) *Call { + c.t.Helper() + + mt := c.methodType + // TODO: This will break on variadic methods. + // We will need to check those at invocation time. + if n < 0 || n >= mt.NumIn() { + c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]", + n, mt.NumIn(), c.origin) + } + // Permit setting argument through an interface. + // In the interface case, we don't (nay, can't) check the type here. + at := mt.In(n) + switch at.Kind() { + case reflect.Ptr: + dt := at.Elem() + if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) { + c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]", + n, vt, dt, c.origin) + } + case reflect.Interface: + // nothing to do + case reflect.Slice: + // nothing to do + case reflect.Map: + // nothing to do + default: + c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice non-map type %v [%s]", + n, at, c.origin) + } + + c.addAction(func(args []interface{}) []interface{} { + v := reflect.ValueOf(value) + switch reflect.TypeOf(args[n]).Kind() { + case reflect.Slice: + setSlice(args[n], v) + case reflect.Map: + setMap(args[n], v) + default: + reflect.ValueOf(args[n]).Elem().Set(v) + } + return nil + }) + return c +} + +// isPreReq returns true if other is a direct or indirect prerequisite to c. +func (c *Call) isPreReq(other *Call) bool { + for _, preReq := range c.preReqs { + if other == preReq || preReq.isPreReq(other) { + return true + } + } + return false +} + +// After declares that the call may only match after preReq has been exhausted. +func (c *Call) After(preReq *Call) *Call { + c.t.Helper() + + if c == preReq { + c.t.Fatalf("A call isn't allowed to be its own prerequisite") + } + if preReq.isPreReq(c) { + c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq) + } + + c.preReqs = append(c.preReqs, preReq) + return c +} + +// Returns true if the minimum number of calls have been made. +func (c *Call) satisfied() bool { + return c.numCalls >= c.minCalls +} + +// Returns true if the maximum number of calls have been made. +func (c *Call) exhausted() bool { + return c.numCalls >= c.maxCalls +} + +func (c *Call) String() string { + args := make([]string, len(c.args)) + for i, arg := range c.args { + args[i] = arg.String() + } + arguments := strings.Join(args, ", ") + return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin) +} + +// Tests if the given call matches the expected call. +// If yes, returns nil. If no, returns error with message explaining why it does not match. +func (c *Call) matches(args []interface{}) error { + if !c.methodType.IsVariadic() { + if len(args) != len(c.args) { + return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d", + c.origin, len(args), len(c.args)) + } + + for i, m := range c.args { + if !m.Matches(args[i]) { + return fmt.Errorf( + "expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v", + c.origin, i, formatGottenArg(m, args[i]), m, + ) + } + } + } else { + if len(c.args) < c.methodType.NumIn()-1 { + return fmt.Errorf("expected call at %s has the wrong number of matchers. Got: %d, want: %d", + c.origin, len(c.args), c.methodType.NumIn()-1) + } + if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) { + return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d", + c.origin, len(args), len(c.args)) + } + if len(args) < len(c.args)-1 { + return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d", + c.origin, len(args), len(c.args)-1) + } + + for i, m := range c.args { + if i < c.methodType.NumIn()-1 { + // Non-variadic args + if !m.Matches(args[i]) { + return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m) + } + continue + } + // The last arg has a possibility of a variadic argument, so let it branch + + // sample: Foo(a int, b int, c ...int) + if i < len(c.args) && i < len(args) { + if m.Matches(args[i]) { + // Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any()) + // Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher) + // Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC) + // Got Foo(a, b) want Foo(matcherA, matcherB) + // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD) + continue + } + } + + // The number of actual args don't match the number of matchers, + // or the last matcher is a slice and the last arg is not. + // If this function still matches it is because the last matcher + // matches all the remaining arguments or the lack of any. + // Convert the remaining arguments, if any, into a slice of the + // expected type. + vArgsType := c.methodType.In(c.methodType.NumIn() - 1) + vArgs := reflect.MakeSlice(vArgsType, 0, len(args)-i) + for _, arg := range args[i:] { + vArgs = reflect.Append(vArgs, reflect.ValueOf(arg)) + } + if m.Matches(vArgs.Interface()) { + // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any()) + // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher) + // Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any()) + // Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher) + break + } + // Wrong number of matchers or not match. Fail. + // Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD) + // Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD) + // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE) + // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD) + // Got Foo(a, b, c) want Foo(matcherA, matcherB) + + return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", + c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i]) + } + } + + // Check that all prerequisite calls have been satisfied. + for _, preReqCall := range c.preReqs { + if !preReqCall.satisfied() { + return fmt.Errorf("expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v", + c.origin, preReqCall, c) + } + } + + // Check that the call is not exhausted. + if c.exhausted() { + return fmt.Errorf("expected call at %s has already been called the max number of times", c.origin) + } + + return nil +} + +// dropPrereqs tells the expected Call to not re-check prerequisite calls any +// longer, and to return its current set. +func (c *Call) dropPrereqs() (preReqs []*Call) { + preReqs = c.preReqs + c.preReqs = nil + return +} + +func (c *Call) call() []func([]interface{}) []interface{} { + c.numCalls++ + return c.actions +} + +// InOrder declares that the given calls should occur in order. +func InOrder(calls ...*Call) { + for i := 1; i < len(calls); i++ { + calls[i].After(calls[i-1]) + } +} + +func setSlice(arg interface{}, v reflect.Value) { + va := reflect.ValueOf(arg) + for i := 0; i < v.Len(); i++ { + va.Index(i).Set(v.Index(i)) + } +} + +func setMap(arg interface{}, v reflect.Value) { + va := reflect.ValueOf(arg) + for _, e := range va.MapKeys() { + va.SetMapIndex(e, reflect.Value{}) + } + for _, e := range v.MapKeys() { + va.SetMapIndex(e, v.MapIndex(e)) + } +} + +func (c *Call) addAction(action func([]interface{}) []interface{}) { + c.actions = append(c.actions, action) +} + +func formatGottenArg(m Matcher, arg interface{}) string { + got := fmt.Sprintf("%v (%T)", arg, arg) + if gs, ok := m.(GotFormatter); ok { + got = gs.Got(arg) + } + return got +} diff --git a/vendor/go.uber.org/mock/gomock/callset.go b/vendor/go.uber.org/mock/gomock/callset.go new file mode 100644 index 000000000..f2131a14e --- /dev/null +++ b/vendor/go.uber.org/mock/gomock/callset.go @@ -0,0 +1,164 @@ +// Copyright 2011 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +import ( + "bytes" + "errors" + "fmt" + "sync" +) + +// callSet represents a set of expected calls, indexed by receiver and method +// name. +type callSet struct { + // Calls that are still expected. + expected map[callSetKey][]*Call + expectedMu *sync.Mutex + // Calls that have been exhausted. + exhausted map[callSetKey][]*Call + // when set to true, existing call expectations are overridden when new call expectations are made + allowOverride bool +} + +// callSetKey is the key in the maps in callSet +type callSetKey struct { + receiver interface{} + fname string +} + +func newCallSet() *callSet { + return &callSet{ + expected: make(map[callSetKey][]*Call), + expectedMu: &sync.Mutex{}, + exhausted: make(map[callSetKey][]*Call), + } +} + +func newOverridableCallSet() *callSet { + return &callSet{ + expected: make(map[callSetKey][]*Call), + expectedMu: &sync.Mutex{}, + exhausted: make(map[callSetKey][]*Call), + allowOverride: true, + } +} + +// Add adds a new expected call. +func (cs callSet) Add(call *Call) { + key := callSetKey{call.receiver, call.method} + + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + + m := cs.expected + if call.exhausted() { + m = cs.exhausted + } + if cs.allowOverride { + m[key] = make([]*Call, 0) + } + + m[key] = append(m[key], call) +} + +// Remove removes an expected call. +func (cs callSet) Remove(call *Call) { + key := callSetKey{call.receiver, call.method} + + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + + calls := cs.expected[key] + for i, c := range calls { + if c == call { + // maintain order for remaining calls + cs.expected[key] = append(calls[:i], calls[i+1:]...) + cs.exhausted[key] = append(cs.exhausted[key], call) + break + } + } +} + +// FindMatch searches for a matching call. Returns error with explanation message if no call matched. +func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) { + key := callSetKey{receiver, method} + + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + + // Search through the expected calls. + expected := cs.expected[key] + var callsErrors bytes.Buffer + for _, call := range expected { + err := call.matches(args) + if err != nil { + _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) + } else { + return call, nil + } + } + + // If we haven't found a match then search through the exhausted calls so we + // get useful error messages. + exhausted := cs.exhausted[key] + for _, call := range exhausted { + if err := call.matches(args); err != nil { + _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) + continue + } + _, _ = fmt.Fprintf( + &callsErrors, "all expected calls for method %q have been exhausted", method, + ) + } + + if len(expected)+len(exhausted) == 0 { + _, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method) + } + + return nil, errors.New(callsErrors.String()) +} + +// Failures returns the calls that are not satisfied. +func (cs callSet) Failures() []*Call { + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + + failures := make([]*Call, 0, len(cs.expected)) + for _, calls := range cs.expected { + for _, call := range calls { + if !call.satisfied() { + failures = append(failures, call) + } + } + } + return failures +} + +// Satisfied returns true in case all expected calls in this callSet are satisfied. +func (cs callSet) Satisfied() bool { + cs.expectedMu.Lock() + defer cs.expectedMu.Unlock() + + for _, calls := range cs.expected { + for _, call := range calls { + if !call.satisfied() { + return false + } + } + } + + return true +} diff --git a/vendor/go.uber.org/mock/gomock/controller.go b/vendor/go.uber.org/mock/gomock/controller.go new file mode 100644 index 000000000..de904c8c9 --- /dev/null +++ b/vendor/go.uber.org/mock/gomock/controller.go @@ -0,0 +1,324 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +import ( + "context" + "fmt" + "reflect" + "runtime" + "sync" +) + +// A TestReporter is something that can be used to report test failures. It +// is satisfied by the standard library's *testing.T. +type TestReporter interface { + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) +} + +// TestHelper is a TestReporter that has the Helper method. It is satisfied +// by the standard library's *testing.T. +type TestHelper interface { + TestReporter + Helper() +} + +// cleanuper is used to check if TestHelper also has the `Cleanup` method. A +// common pattern is to pass in a `*testing.T` to +// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup +// method. This can be utilized to call `Finish()` so the caller of this library +// does not have to. +type cleanuper interface { + Cleanup(func()) +} + +// A Controller represents the top-level control of a mock ecosystem. It +// defines the scope and lifetime of mock objects, as well as their +// expectations. It is safe to call Controller's methods from multiple +// goroutines. Each test should create a new Controller and invoke Finish via +// defer. +// +// func TestFoo(t *testing.T) { +// ctrl := gomock.NewController(t) +// defer ctrl.Finish() +// // .. +// } +// +// func TestBar(t *testing.T) { +// t.Run("Sub-Test-1", st) { +// ctrl := gomock.NewController(st) +// defer ctrl.Finish() +// // .. +// }) +// t.Run("Sub-Test-2", st) { +// ctrl := gomock.NewController(st) +// defer ctrl.Finish() +// // .. +// }) +// }) +type Controller struct { + // T should only be called within a generated mock. It is not intended to + // be used in user code and may be changed in future versions. T is the + // TestReporter passed in when creating the Controller via NewController. + // If the TestReporter does not implement a TestHelper it will be wrapped + // with a nopTestHelper. + T TestHelper + mu sync.Mutex + expectedCalls *callSet + finished bool +} + +// NewController returns a new Controller. It is the preferred way to create a +// Controller. +// +// New in go1.14+, if you are passing a *testing.T into this function you no +// longer need to call ctrl.Finish() in your test methods. +func NewController(t TestReporter, opts ...ControllerOption) *Controller { + h, ok := t.(TestHelper) + if !ok { + h = &nopTestHelper{t} + } + ctrl := &Controller{ + T: h, + expectedCalls: newCallSet(), + } + for _, opt := range opts { + opt.apply(ctrl) + } + if c, ok := isCleanuper(ctrl.T); ok { + c.Cleanup(func() { + ctrl.T.Helper() + ctrl.finish(true, nil) + }) + } + + return ctrl +} + +// ControllerOption configures how a Controller should behave. +type ControllerOption interface { + apply(*Controller) +} + +type overridableExpectationsOption struct{} + +// WithOverridableExpectations allows for overridable call expectations +// i.e., subsequent call expectations override existing call expectations +func WithOverridableExpectations() overridableExpectationsOption { + return overridableExpectationsOption{} +} + +func (o overridableExpectationsOption) apply(ctrl *Controller) { + ctrl.expectedCalls = newOverridableCallSet() +} + +type cancelReporter struct { + t TestHelper + cancel func() +} + +func (r *cancelReporter) Errorf(format string, args ...interface{}) { + r.t.Errorf(format, args...) +} +func (r *cancelReporter) Fatalf(format string, args ...interface{}) { + defer r.cancel() + r.t.Fatalf(format, args...) +} + +func (r *cancelReporter) Helper() { + r.t.Helper() +} + +// WithContext returns a new Controller and a Context, which is cancelled on any +// fatal failure. +func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) { + h, ok := t.(TestHelper) + if !ok { + h = &nopTestHelper{t: t} + } + + ctx, cancel := context.WithCancel(ctx) + return NewController(&cancelReporter{t: h, cancel: cancel}), ctx +} + +type nopTestHelper struct { + t TestReporter +} + +func (h *nopTestHelper) Errorf(format string, args ...interface{}) { + h.t.Errorf(format, args...) +} +func (h *nopTestHelper) Fatalf(format string, args ...interface{}) { + h.t.Fatalf(format, args...) +} + +func (h nopTestHelper) Helper() {} + +// RecordCall is called by a mock. It should not be called by user code. +func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call { + ctrl.T.Helper() + + recv := reflect.ValueOf(receiver) + for i := 0; i < recv.Type().NumMethod(); i++ { + if recv.Type().Method(i).Name == method { + return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...) + } + } + ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver) + panic("unreachable") +} + +// RecordCallWithMethodType is called by a mock. It should not be called by user code. +func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { + ctrl.T.Helper() + + call := newCall(ctrl.T, receiver, method, methodType, args...) + + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + ctrl.expectedCalls.Add(call) + + return call +} + +// Call is called by a mock. It should not be called by user code. +func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} { + ctrl.T.Helper() + + // Nest this code so we can use defer to make sure the lock is released. + actions := func() []func([]interface{}) []interface{} { + ctrl.T.Helper() + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + + expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args) + if err != nil { + // callerInfo's skip should be updated if the number of calls between the user's test + // and this line changes, i.e. this code is wrapped in another anonymous function. + // 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test. + origin := callerInfo(3) + ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err) + } + + // Two things happen here: + // * the matching call no longer needs to check prerequite calls, + // * and the prerequite calls are no longer expected, so remove them. + preReqCalls := expected.dropPrereqs() + for _, preReqCall := range preReqCalls { + ctrl.expectedCalls.Remove(preReqCall) + } + + actions := expected.call() + if expected.exhausted() { + ctrl.expectedCalls.Remove(expected) + } + return actions + }() + + var rets []interface{} + for _, action := range actions { + if r := action(args); r != nil { + rets = r + } + } + + return rets +} + +// Finish checks to see if all the methods that were expected to be called +// were called. It should be invoked for each Controller. It is not idempotent +// and therefore can only be invoked once. +// +// New in go1.14+, if you are passing a *testing.T into NewController function you no +// longer need to call ctrl.Finish() in your test methods. +func (ctrl *Controller) Finish() { + // If we're currently panicking, probably because this is a deferred call. + // This must be recovered in the deferred function. + err := recover() + ctrl.finish(false, err) +} + +// Satisfied returns whether all expected calls bound to this Controller have been satisfied. +// Calling Finish is then guaranteed to not fail due to missing calls. +func (ctrl *Controller) Satisfied() bool { + return ctrl.expectedCalls.Satisfied() +} + +func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) { + ctrl.T.Helper() + + ctrl.mu.Lock() + defer ctrl.mu.Unlock() + + if ctrl.finished { + if _, ok := isCleanuper(ctrl.T); !ok { + ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") + } + return + } + ctrl.finished = true + + // Short-circuit, pass through the panic. + if panicErr != nil { + panic(panicErr) + } + + // Check that all remaining expected calls are satisfied. + failures := ctrl.expectedCalls.Failures() + for _, call := range failures { + ctrl.T.Errorf("missing call(s) to %v", call) + } + if len(failures) != 0 { + if !cleanup { + ctrl.T.Fatalf("aborting test due to missing call(s)") + return + } + ctrl.T.Errorf("aborting test due to missing call(s)") + } +} + +// callerInfo returns the file:line of the call site. skip is the number +// of stack frames to skip when reporting. 0 is callerInfo's call site. +func callerInfo(skip int) string { + if _, file, line, ok := runtime.Caller(skip + 1); ok { + return fmt.Sprintf("%s:%d", file, line) + } + return "unknown file" +} + +// isCleanuper checks it if t's base TestReporter has a Cleanup method. +func isCleanuper(t TestReporter) (cleanuper, bool) { + tr := unwrapTestReporter(t) + c, ok := tr.(cleanuper) + return c, ok +} + +// unwrapTestReporter unwraps TestReporter to the base implementation. +func unwrapTestReporter(t TestReporter) TestReporter { + tr := t + switch nt := t.(type) { + case *cancelReporter: + tr = nt.t + if h, check := tr.(*nopTestHelper); check { + tr = h.t + } + case *nopTestHelper: + tr = nt.t + default: + // not wrapped + } + return tr +} diff --git a/vendor/go.uber.org/mock/gomock/doc.go b/vendor/go.uber.org/mock/gomock/doc.go new file mode 100644 index 000000000..f1a304fb3 --- /dev/null +++ b/vendor/go.uber.org/mock/gomock/doc.go @@ -0,0 +1,60 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gomock is a mock framework for Go. +// +// Standard usage: +// +// (1) Define an interface that you wish to mock. +// type MyInterface interface { +// SomeMethod(x int64, y string) +// } +// (2) Use mockgen to generate a mock from the interface. +// (3) Use the mock in a test: +// func TestMyThing(t *testing.T) { +// mockCtrl := gomock.NewController(t)// +// mockObj := something.NewMockMyInterface(mockCtrl) +// mockObj.EXPECT().SomeMethod(4, "blah") +// // pass mockObj to a real object and play with it. +// } +// +// By default, expected calls are not enforced to run in any particular order. +// Call order dependency can be enforced by use of InOrder and/or Call.After. +// Call.After can create more varied call order dependencies, but InOrder is +// often more convenient. +// +// The following examples create equivalent call order dependencies. +// +// Example of using Call.After to chain expected call order: +// +// firstCall := mockObj.EXPECT().SomeMethod(1, "first") +// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall) +// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall) +// +// Example of using InOrder to declare expected call order: +// +// gomock.InOrder( +// mockObj.EXPECT().SomeMethod(1, "first"), +// mockObj.EXPECT().SomeMethod(2, "second"), +// mockObj.EXPECT().SomeMethod(3, "third"), +// ) +// +// The standard TestReporter most users will pass to `NewController` is a +// `*testing.T` from the context of the test. Note that this will use the +// standard `t.Error` and `t.Fatal` methods to report what happened in the test. +// In some cases this can leave your testing package in a weird state if global +// state is used since `t.Fatal` is like calling panic in the middle of a +// function. In these cases it is recommended that you pass in your own +// `TestReporter`. +package gomock diff --git a/vendor/go.uber.org/mock/gomock/matchers.go b/vendor/go.uber.org/mock/gomock/matchers.go new file mode 100644 index 000000000..6d5eff4fe --- /dev/null +++ b/vendor/go.uber.org/mock/gomock/matchers.go @@ -0,0 +1,346 @@ +// Copyright 2010 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gomock + +import ( + "fmt" + "reflect" + "strings" +) + +// A Matcher is a representation of a class of values. +// It is used to represent the valid or expected arguments to a mocked method. +type Matcher interface { + // Matches returns whether x is a match. + Matches(x interface{}) bool + + // String describes what the matcher matches. + String() string +} + +// WantFormatter modifies the given Matcher's String() method to the given +// Stringer. This allows for control on how the "Want" is formatted when +// printing . +func WantFormatter(s fmt.Stringer, m Matcher) Matcher { + type matcher interface { + Matches(x interface{}) bool + } + + return struct { + matcher + fmt.Stringer + }{ + matcher: m, + Stringer: s, + } +} + +// StringerFunc type is an adapter to allow the use of ordinary functions as +// a Stringer. If f is a function with the appropriate signature, +// StringerFunc(f) is a Stringer that calls f. +type StringerFunc func() string + +// String implements fmt.Stringer. +func (f StringerFunc) String() string { + return f() +} + +// GotFormatter is used to better print failure messages. If a matcher +// implements GotFormatter, it will use the result from Got when printing +// the failure message. +type GotFormatter interface { + // Got is invoked with the received value. The result is used when + // printing the failure message. + Got(got interface{}) string +} + +// GotFormatterFunc type is an adapter to allow the use of ordinary +// functions as a GotFormatter. If f is a function with the appropriate +// signature, GotFormatterFunc(f) is a GotFormatter that calls f. +type GotFormatterFunc func(got interface{}) string + +// Got implements GotFormatter. +func (f GotFormatterFunc) Got(got interface{}) string { + return f(got) +} + +// GotFormatterAdapter attaches a GotFormatter to a Matcher. +func GotFormatterAdapter(s GotFormatter, m Matcher) Matcher { + return struct { + GotFormatter + Matcher + }{ + GotFormatter: s, + Matcher: m, + } +} + +type anyMatcher struct{} + +func (anyMatcher) Matches(interface{}) bool { + return true +} + +func (anyMatcher) String() string { + return "is anything" +} + +type eqMatcher struct { + x interface{} +} + +func (e eqMatcher) Matches(x interface{}) bool { + // In case, some value is nil + if e.x == nil || x == nil { + return reflect.DeepEqual(e.x, x) + } + + // Check if types assignable and convert them to common type + x1Val := reflect.ValueOf(e.x) + x2Val := reflect.ValueOf(x) + + if x1Val.Type().AssignableTo(x2Val.Type()) { + x1ValConverted := x1Val.Convert(x2Val.Type()) + return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface()) + } + + return false +} + +func (e eqMatcher) String() string { + return fmt.Sprintf("is equal to %v (%T)", e.x, e.x) +} + +type nilMatcher struct{} + +func (nilMatcher) Matches(x interface{}) bool { + if x == nil { + return true + } + + v := reflect.ValueOf(x) + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice: + return v.IsNil() + } + + return false +} + +func (nilMatcher) String() string { + return "is nil" +} + +type notMatcher struct { + m Matcher +} + +func (n notMatcher) Matches(x interface{}) bool { + return !n.m.Matches(x) +} + +func (n notMatcher) String() string { + return "not(" + n.m.String() + ")" +} + +type assignableToTypeOfMatcher struct { + targetType reflect.Type +} + +func (m assignableToTypeOfMatcher) Matches(x interface{}) bool { + return reflect.TypeOf(x).AssignableTo(m.targetType) +} + +func (m assignableToTypeOfMatcher) String() string { + return "is assignable to " + m.targetType.Name() +} + +type allMatcher struct { + matchers []Matcher +} + +func (am allMatcher) Matches(x interface{}) bool { + for _, m := range am.matchers { + if !m.Matches(x) { + return false + } + } + return true +} + +func (am allMatcher) String() string { + ss := make([]string, 0, len(am.matchers)) + for _, matcher := range am.matchers { + ss = append(ss, matcher.String()) + } + return strings.Join(ss, "; ") +} + +type lenMatcher struct { + i int +} + +func (m lenMatcher) Matches(x interface{}) bool { + v := reflect.ValueOf(x) + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == m.i + default: + return false + } +} + +func (m lenMatcher) String() string { + return fmt.Sprintf("has length %d", m.i) +} + +type inAnyOrderMatcher struct { + x interface{} +} + +func (m inAnyOrderMatcher) Matches(x interface{}) bool { + given, ok := m.prepareValue(x) + if !ok { + return false + } + wanted, ok := m.prepareValue(m.x) + if !ok { + return false + } + + if given.Len() != wanted.Len() { + return false + } + + usedFromGiven := make([]bool, given.Len()) + foundFromWanted := make([]bool, wanted.Len()) + for i := 0; i < wanted.Len(); i++ { + wantedMatcher := Eq(wanted.Index(i).Interface()) + for j := 0; j < given.Len(); j++ { + if usedFromGiven[j] { + continue + } + if wantedMatcher.Matches(given.Index(j).Interface()) { + foundFromWanted[i] = true + usedFromGiven[j] = true + break + } + } + } + + missingFromWanted := 0 + for _, found := range foundFromWanted { + if !found { + missingFromWanted++ + } + } + extraInGiven := 0 + for _, used := range usedFromGiven { + if !used { + extraInGiven++ + } + } + + return extraInGiven == 0 && missingFromWanted == 0 +} + +func (m inAnyOrderMatcher) prepareValue(x interface{}) (reflect.Value, bool) { + xValue := reflect.ValueOf(x) + switch xValue.Kind() { + case reflect.Slice, reflect.Array: + return xValue, true + default: + return reflect.Value{}, false + } +} + +func (m inAnyOrderMatcher) String() string { + return fmt.Sprintf("has the same elements as %v", m.x) +} + +// Constructors + +// All returns a composite Matcher that returns true if and only all of the +// matchers return true. +func All(ms ...Matcher) Matcher { return allMatcher{ms} } + +// Any returns a matcher that always matches. +func Any() Matcher { return anyMatcher{} } + +// Eq returns a matcher that matches on equality. +// +// Example usage: +// +// Eq(5).Matches(5) // returns true +// Eq(5).Matches(4) // returns false +func Eq(x interface{}) Matcher { return eqMatcher{x} } + +// Len returns a matcher that matches on length. This matcher returns false if +// is compared to a type that is not an array, chan, map, slice, or string. +func Len(i int) Matcher { + return lenMatcher{i} +} + +// Nil returns a matcher that matches if the received value is nil. +// +// Example usage: +// +// var x *bytes.Buffer +// Nil().Matches(x) // returns true +// x = &bytes.Buffer{} +// Nil().Matches(x) // returns false +func Nil() Matcher { return nilMatcher{} } + +// Not reverses the results of its given child matcher. +// +// Example usage: +// +// Not(Eq(5)).Matches(4) // returns true +// Not(Eq(5)).Matches(5) // returns false +func Not(x interface{}) Matcher { + if m, ok := x.(Matcher); ok { + return notMatcher{m} + } + return notMatcher{Eq(x)} +} + +// AssignableToTypeOf is a Matcher that matches if the parameter to the mock +// function is assignable to the type of the parameter to this function. +// +// Example usage: +// +// var s fmt.Stringer = &bytes.Buffer{} +// AssignableToTypeOf(s).Matches(time.Second) // returns true +// AssignableToTypeOf(s).Matches(99) // returns false +// +// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem() +// AssignableToTypeOf(ctx).Matches(context.Background()) // returns true +func AssignableToTypeOf(x interface{}) Matcher { + if xt, ok := x.(reflect.Type); ok { + return assignableToTypeOfMatcher{xt} + } + return assignableToTypeOfMatcher{reflect.TypeOf(x)} +} + +// InAnyOrder is a Matcher that returns true for collections of the same elements ignoring the order. +// +// Example usage: +// +// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true +// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false +func InAnyOrder(x interface{}) Matcher { + return inAnyOrderMatcher{x} +} diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go new file mode 100644 index 000000000..c8ae0aaf5 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -0,0 +1,698 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "path" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Name = name + + return action +} + +func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Object = object + + return action +} + +func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Object = object + + return action +} + +func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Namespace = namespace + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Namespace = namespace + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Object = object + + return action +} +func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootDeleteAction(resource schema.GroupVersionResource, name string) DeleteActionImpl { + return NewRootDeleteActionWithOptions(resource, name, metav1.DeleteOptions{}) +} + +func NewRootDeleteActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.DeleteOptions) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Name = name + action.DeleteOptions = opts + + return action +} + +func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl { + return NewDeleteActionWithOptions(resource, namespace, name, metav1.DeleteOptions{}) +} + +func NewDeleteActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Namespace = namespace + action.Name = name + action.DeleteOptions = opts + + return action +} + +func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { + var err error + switch t := opts.(type) { + case metav1.ListOptions: + labelSelector, err = labels.Parse(t.LabelSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.LabelSelector, err)) + } + fieldSelector, err = fields.ParseSelector(t.FieldSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.FieldSelector, err)) + } + resourceVersion = t.ResourceVersion + default: + panic(fmt.Errorf("expect a ListOptions %T", opts)) + } + if labelSelector == nil { + labelSelector = labels.Everything() + } + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + return labelSelector, fieldSelector, resourceVersion +} + +func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func NewProxyGetAction(resource schema.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl { + action := ProxyGetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Scheme = scheme + action.Name = name + action.Port = port + action.Path = path + action.Params = params + return action +} + +type ListRestrictions struct { + Labels labels.Selector + Fields fields.Selector +} +type WatchRestrictions struct { + Labels labels.Selector + Fields fields.Selector + ResourceVersion string +} + +type Action interface { + GetNamespace() string + GetVerb() string + GetResource() schema.GroupVersionResource + GetSubresource() string + Matches(verb, resource string) bool + + // DeepCopy is used to copy an action to avoid any risk of accidental mutation. Most people never need to call this + // because the invocation logic deep copies before calls to storage and reactors. + DeepCopy() Action +} + +type GenericAction interface { + Action + GetValue() interface{} +} + +type GetAction interface { + Action + GetName() string +} + +type ListAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type CreateAction interface { + Action + GetObject() runtime.Object +} + +type UpdateAction interface { + Action + GetObject() runtime.Object +} + +type DeleteAction interface { + Action + GetName() string + GetDeleteOptions() metav1.DeleteOptions +} + +type DeleteCollectionAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type PatchAction interface { + Action + GetName() string + GetPatchType() types.PatchType + GetPatch() []byte +} + +type WatchAction interface { + Action + GetWatchRestrictions() WatchRestrictions +} + +type ProxyGetAction interface { + Action + GetScheme() string + GetName() string + GetPort() string + GetPath() string + GetParams() map[string]string +} + +type ActionImpl struct { + Namespace string + Verb string + Resource schema.GroupVersionResource + Subresource string +} + +func (a ActionImpl) GetNamespace() string { + return a.Namespace +} +func (a ActionImpl) GetVerb() string { + return a.Verb +} +func (a ActionImpl) GetResource() schema.GroupVersionResource { + return a.Resource +} +func (a ActionImpl) GetSubresource() string { + return a.Subresource +} +func (a ActionImpl) Matches(verb, resource string) bool { + // Stay backwards compatible. + if !strings.Contains(resource, "/") { + return strings.EqualFold(verb, a.Verb) && + strings.EqualFold(resource, a.Resource.Resource) + } + + parts := strings.SplitN(resource, "/", 2) + topresource, subresource := parts[0], parts[1] + + return strings.EqualFold(verb, a.Verb) && + strings.EqualFold(topresource, a.Resource.Resource) && + strings.EqualFold(subresource, a.Subresource) +} +func (a ActionImpl) DeepCopy() Action { + ret := a + return ret +} + +type GenericActionImpl struct { + ActionImpl + Value interface{} +} + +func (a GenericActionImpl) GetValue() interface{} { + return a.Value +} + +func (a GenericActionImpl) DeepCopy() Action { + return GenericActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + // TODO this is wrong, but no worse than before + Value: a.Value, + } +} + +type GetActionImpl struct { + ActionImpl + Name string +} + +func (a GetActionImpl) GetName() string { + return a.Name +} + +func (a GetActionImpl) DeepCopy() Action { + return GetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + +type ListActionImpl struct { + ActionImpl + Kind schema.GroupVersionKind + Name string + ListRestrictions ListRestrictions +} + +func (a ListActionImpl) GetKind() schema.GroupVersionKind { + return a.Kind +} + +func (a ListActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a ListActionImpl) DeepCopy() Action { + return ListActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Kind: a.Kind, + Name: a.Name, + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type CreateActionImpl struct { + ActionImpl + Name string + Object runtime.Object +} + +func (a CreateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a CreateActionImpl) DeepCopy() Action { + return CreateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + } +} + +type UpdateActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a UpdateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a UpdateActionImpl) DeepCopy() Action { + return UpdateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + } +} + +type PatchActionImpl struct { + ActionImpl + Name string + PatchType types.PatchType + Patch []byte +} + +func (a PatchActionImpl) GetName() string { + return a.Name +} + +func (a PatchActionImpl) GetPatch() []byte { + return a.Patch +} + +func (a PatchActionImpl) GetPatchType() types.PatchType { + return a.PatchType +} + +func (a PatchActionImpl) DeepCopy() Action { + patch := make([]byte, len(a.Patch)) + copy(patch, a.Patch) + return PatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + PatchType: a.PatchType, + Patch: patch, + } +} + +type DeleteActionImpl struct { + ActionImpl + Name string + DeleteOptions metav1.DeleteOptions +} + +func (a DeleteActionImpl) GetName() string { + return a.Name +} + +func (a DeleteActionImpl) GetDeleteOptions() metav1.DeleteOptions { + return a.DeleteOptions +} + +func (a DeleteActionImpl) DeepCopy() Action { + return DeleteActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + DeleteOptions: *a.DeleteOptions.DeepCopy(), + } +} + +type DeleteCollectionActionImpl struct { + ActionImpl + ListRestrictions ListRestrictions +} + +func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a DeleteCollectionActionImpl) DeepCopy() Action { + return DeleteCollectionActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type WatchActionImpl struct { + ActionImpl + WatchRestrictions WatchRestrictions +} + +func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { + return a.WatchRestrictions +} + +func (a WatchActionImpl) DeepCopy() Action { + return WatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + WatchRestrictions: WatchRestrictions{ + Labels: a.WatchRestrictions.Labels.DeepCopySelector(), + Fields: a.WatchRestrictions.Fields.DeepCopySelector(), + ResourceVersion: a.WatchRestrictions.ResourceVersion, + }, + } +} + +type ProxyGetActionImpl struct { + ActionImpl + Scheme string + Name string + Port string + Path string + Params map[string]string +} + +func (a ProxyGetActionImpl) GetScheme() string { + return a.Scheme +} + +func (a ProxyGetActionImpl) GetName() string { + return a.Name +} + +func (a ProxyGetActionImpl) GetPort() string { + return a.Port +} + +func (a ProxyGetActionImpl) GetPath() string { + return a.Path +} + +func (a ProxyGetActionImpl) GetParams() map[string]string { + return a.Params +} + +func (a ProxyGetActionImpl) DeepCopy() Action { + params := map[string]string{} + for k, v := range a.Params { + params[k] = v + } + return ProxyGetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Scheme: a.Scheme, + Name: a.Name, + Port: a.Port, + Path: a.Path, + Params: params, + } +} diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go new file mode 100644 index 000000000..3ab9c1b07 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fake.go @@ -0,0 +1,220 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// Fake implements client.Interface. Meant to be embedded into a struct to get +// a default implementation. This makes faking out just the method you want to +// test easier. +type Fake struct { + sync.RWMutex + actions []Action // these may be castable to other types, but "Action" is the minimum + + // ReactionChain is the list of reactors that will be attempted for every + // request in the order they are tried. + ReactionChain []Reactor + // WatchReactionChain is the list of watch reactors that will be attempted + // for every request in the order they are tried. + WatchReactionChain []WatchReactor + // ProxyReactionChain is the list of proxy reactors that will be attempted + // for every request in the order they are tried. + ProxyReactionChain []ProxyReactor + + Resources []*metav1.APIResourceList +} + +// Reactor is an interface to allow the composition of reaction functions. +type Reactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles the action and returns results. It may choose to + // delegate by indicated handled=false. + React(action Action) (handled bool, ret runtime.Object, err error) +} + +// WatchReactor is an interface to allow the composition of watch functions. +type WatchReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret watch.Interface, err error) +} + +// ProxyReactor is an interface to allow the composition of proxy get +// functions. +type ProxyReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret restclient.ResponseWrapper, err error) +} + +// ReactionFunc is a function that returns an object or error for a given +// Action. If "handled" is false, then the test client will ignore the +// results and continue to the next ReactionFunc. A ReactionFunc can describe +// reactions on subresources by testing the result of the action's +// GetSubresource() method. +type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error) + +// WatchReactionFunc is a function that returns a watch interface. If +// "handled" is false, then the test client will ignore the results and +// continue to the next ReactionFunc. +type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error) + +// ProxyReactionFunc is a function that returns a ResponseWrapper interface +// for a given Action. If "handled" is false, then the test client will +// ignore the results and continue to the next ProxyReactionFunc. +type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error) + +// AddReactor appends a reactor to the end of the chain. +func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction}) +} + +// PrependReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...) +} + +// AddWatchReactor appends a reactor to the end of the chain. +func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) { + c.Lock() + defer c.Unlock() + c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction}) +} + +// PrependWatchReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) { + c.Lock() + defer c.Unlock() + c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...) +} + +// AddProxyReactor appends a reactor to the end of the chain. +func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction}) +} + +// PrependProxyReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...) +} + +// Invokes records the provided Action and then invokes the ReactionFunc that +// handles the action if one exists. defaultReturnObj is expected to be of the +// same type a normal call would return. +func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) { + c.Lock() + defer c.Unlock() + + actionCopy := action.DeepCopy() + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ReactionChain { + if !reactor.Handles(actionCopy) { + continue + } + + handled, ret, err := reactor.React(actionCopy) + if !handled { + continue + } + + return ret, err + } + + return defaultReturnObj, nil +} + +// InvokesWatch records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { + c.Lock() + defer c.Unlock() + + actionCopy := action.DeepCopy() + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.WatchReactionChain { + if !reactor.Handles(actionCopy) { + continue + } + + handled, ret, err := reactor.React(actionCopy) + if !handled { + continue + } + + return ret, err + } + + return nil, fmt.Errorf("unhandled watch: %#v", action) +} + +// InvokesProxy records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { + c.Lock() + defer c.Unlock() + + actionCopy := action.DeepCopy() + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ProxyReactionChain { + if !reactor.Handles(actionCopy) { + continue + } + + handled, ret, err := reactor.React(actionCopy) + if !handled || err != nil { + continue + } + + return ret + } + + return nil +} + +// ClearActions clears the history of actions called on the fake client. +func (c *Fake) ClearActions() { + c.Lock() + defer c.Unlock() + + c.actions = make([]Action, 0) +} + +// Actions returns a chronologically ordered slice fake actions called on the +// fake client. +func (c *Fake) Actions() []Action { + c.RLock() + defer c.RUnlock() + fa := make([]Action, len(c.actions)) + copy(fa, c.actions) + return fa +} diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go new file mode 100644 index 000000000..396840670 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -0,0 +1,581 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "reflect" + "sort" + "strings" + "sync" + + jsonpatch "github.com/evanphx/json-patch" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// ObjectTracker keeps track of objects. It is intended to be used to +// fake calls to a server by returning objects based on their kind, +// namespace and name. +type ObjectTracker interface { + // Add adds an object to the tracker. If object being added + // is a list, its items are added separately. + Add(obj runtime.Object) error + + // Get retrieves the object by its kind, namespace and name. + Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) + + // Create adds an object to the tracker in the specified namespace. + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // Update updates an existing object in the tracker in the specified namespace. + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // List retrieves all objects of a given kind in the given + // namespace. Only non-List kinds are accepted. + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + + // Delete deletes an existing object from the tracker. If object + // didn't exist in the tracker prior to deletion, Delete returns + // no error. + Delete(gvr schema.GroupVersionResource, ns, name string) error + + // Watch watches objects from the tracker. Watch returns a channel + // which will push added / modified / deleted object. + Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) +} + +// ObjectScheme abstracts the implementation of common operations on objects. +type ObjectScheme interface { + runtime.ObjectCreater + runtime.ObjectTyper +} + +// ObjectReaction returns a ReactionFunc that applies core.Action to +// the given tracker. +func ObjectReaction(tracker ObjectTracker) ReactionFunc { + return func(action Action) (bool, runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + // Here and below we need to switch on implementation types, + // not on interfaces, as some interfaces are identical + // (e.g. UpdateAction and CreateAction), so if we use them, + // updates and creates end up matching the same case branch. + switch action := action.(type) { + + case ListActionImpl: + obj, err := tracker.List(gvr, action.GetKind(), ns) + return true, obj, err + + case GetActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + return true, obj, err + + case CreateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + if action.GetSubresource() == "" { + err = tracker.Create(gvr, action.GetObject(), ns) + } else { + oldObj, getOldObjErr := tracker.Get(gvr, ns, objMeta.GetName()) + if getOldObjErr != nil { + return true, nil, getOldObjErr + } + // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation. + if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) { + // TODO: Currently we're handling subresource creation as an update + // on the enclosing resource. This works for some subresources but + // might not be generic enough. + err = tracker.Update(gvr, action.GetObject(), ns) + } else { + // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker. + return true, action.GetObject(), nil + } + } + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case UpdateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + err = tracker.Update(gvr, action.GetObject(), ns) + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case DeleteActionImpl: + err := tracker.Delete(gvr, ns, action.GetName()) + if err != nil { + return true, nil, err + } + return true, nil, nil + + case PatchActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + return true, nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return true, nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return true, nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return true, nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return true, nil, err + } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return true, nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return true, nil, err + } + case types.StrategicMergePatchType, types.ApplyPatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err + } + default: + return true, nil, fmt.Errorf("PatchType is not supported") + } + + if err = tracker.Update(gvr, obj, ns); err != nil { + return true, nil, err + } + + return true, obj, nil + + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} + +type tracker struct { + scheme ObjectScheme + decoder runtime.Decoder + lock sync.RWMutex + objects map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object + // The value type of watchers is a map of which the key is either a namespace or + // all/non namespace aka "" and its value is list of fake watchers. + // Manipulations on resources will broadcast the notification events into the + // watchers' channel. Note that too many unhandled events (currently 100, + // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. + watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher +} + +var _ ObjectTracker = &tracker{} + +// NewObjectTracker returns an ObjectTracker that can be used to keep track +// of objects for the fake clientset. Mostly useful for unit tests. +func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { + return &tracker{ + scheme: scheme, + decoder: decoder, + objects: make(map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object), + watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), + } +} + +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { + // Heuristic for list kind: original kind + List suffix. Might + // not always be true but this tracker has a pretty limited + // understanding of the actual API model. + listGVK := gvk + listGVK.Kind = listGVK.Kind + "List" + // GVK does have the concept of "internal version". The scheme recognizes + // the runtime.APIVersionInternal, but not the empty string. + if listGVK.Version == "" { + listGVK.Version = runtime.APIVersionInternal + } + + list, err := t.scheme.New(listGVK) + if err != nil { + return nil, err + } + + if !meta.IsListType(list) { + return nil, fmt.Errorf("%q is not a list type", listGVK.Kind) + } + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return list, nil + } + + matchingObjs, err := filterByNamespace(objs, ns) + if err != nil { + return nil, err + } + if err := meta.SetList(list, matchingObjs); err != nil { + return nil, err + } + return list.DeepCopyObject(), nil +} + +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { + t.lock.Lock() + defer t.lock.Unlock() + + fakewatcher := watch.NewRaceFreeFake() + + if _, exists := t.watchers[gvr]; !exists { + t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) + } + t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) + return fakewatcher, nil +} + +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { + errNotFound := errors.NewNotFound(gvr.GroupResource(), name) + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return nil, errNotFound + } + + matchingObj, ok := objs[types.NamespacedName{Namespace: ns, Name: name}] + if !ok { + return nil, errNotFound + } + + // Only one object should match in the tracker if it works + // correctly, as Add/Update methods enforce kind/namespace/name + // uniqueness. + obj := matchingObj.DeepCopyObject() + if status, ok := obj.(*metav1.Status); ok { + if status.Status != metav1.StatusSuccess { + return nil, &errors.StatusError{ErrStatus: *status} + } + } + + return obj, nil +} + +func (t *tracker) Add(obj runtime.Object) error { + if meta.IsListType(obj) { + return t.addList(obj, false) + } + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + gvks, _, err := t.scheme.ObjectKinds(obj) + if err != nil { + return err + } + + if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 { + gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()} + } + + if len(gvks) == 0 { + return fmt.Errorf("no registered kinds for %v", obj) + } + for _, gvk := range gvks { + // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The + // actual registration in apiserver can specify arbitrary route for a + // gvk. If a test uses such objects, it cannot preset the tracker with + // objects via Add(). Instead, it should trigger the Create() function + // of the tracker, where an arbitrary gvr can be specified. + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + // Resource doesn't have the concept of "__internal" version, just set it to "". + if gvr.Version == runtime.APIVersionInternal { + gvr.Version = "" + } + + err := t.add(gvr, obj, objMeta.GetNamespace(), false) + if err != nil { + return err + } + } + return nil +} + +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, false) +} + +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { + watches := []*watch.RaceFreeFakeWatcher{} + if t.watchers[gvr] != nil { + if w := t.watchers[gvr][ns]; w != nil { + watches = append(watches, w...) + } + if ns != metav1.NamespaceAll { + if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil { + watches = append(watches, w...) + } + } + } + return watches +} + +func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { + t.lock.Lock() + defer t.lock.Unlock() + + gr := gvr.GroupResource() + + // To avoid the object from being accidentally modified by caller + // after it's been added to the tracker, we always store the deep + // copy. + obj = obj.DeepCopyObject() + + newMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + + // Propagate namespace to the new object if hasn't already been set. + if len(newMeta.GetNamespace()) == 0 { + newMeta.SetNamespace(ns) + } + + if ns != newMeta.GetNamespace() { + msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) + return errors.NewBadRequest(msg) + } + + _, ok := t.objects[gvr] + if !ok { + t.objects[gvr] = make(map[types.NamespacedName]runtime.Object) + } + + namespacedName := types.NamespacedName{Namespace: newMeta.GetNamespace(), Name: newMeta.GetName()} + if _, ok = t.objects[gvr][namespacedName]; ok { + if replaceExisting { + for _, w := range t.getWatches(gvr, ns) { + // To avoid the object from being accidentally modified by watcher + w.Modify(obj.DeepCopyObject()) + } + t.objects[gvr][namespacedName] = obj + return nil + } + return errors.NewAlreadyExists(gr, newMeta.GetName()) + } + + if replaceExisting { + // Tried to update but no matching object was found. + return errors.NewNotFound(gr, newMeta.GetName()) + } + + t.objects[gvr][namespacedName] = obj + + for _, w := range t.getWatches(gvr, ns) { + // To avoid the object from being accidentally modified by watcher + w.Add(obj.DeepCopyObject()) + } + + return nil +} + +func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { + list, err := meta.ExtractList(obj) + if err != nil { + return err + } + errs := runtime.DecodeList(list, t.decoder) + if len(errs) > 0 { + return errs[0] + } + for _, obj := range list { + if err := t.Add(obj); err != nil { + return err + } + } + return nil +} + +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { + t.lock.Lock() + defer t.lock.Unlock() + + objs, ok := t.objects[gvr] + if !ok { + return errors.NewNotFound(gvr.GroupResource(), name) + } + + namespacedName := types.NamespacedName{Namespace: ns, Name: name} + obj, ok := objs[namespacedName] + if !ok { + return errors.NewNotFound(gvr.GroupResource(), name) + } + + delete(objs, namespacedName) + for _, w := range t.getWatches(gvr, ns) { + w.Delete(obj.DeepCopyObject()) + } + return nil +} + +// filterByNamespace returns all objects in the collection that +// match provided namespace. Empty namespace matches +// non-namespaced objects. +func filterByNamespace(objs map[types.NamespacedName]runtime.Object, ns string) ([]runtime.Object, error) { + var res []runtime.Object + + for _, obj := range objs { + acc, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + if ns != "" && acc.GetNamespace() != ns { + continue + } + res = append(res, obj) + } + + // Sort res to get deterministic order. + sort.Slice(res, func(i, j int) bool { + acc1, _ := meta.Accessor(res[i]) + acc2, _ := meta.Accessor(res[j]) + if acc1.GetNamespace() != acc2.GetNamespace() { + return acc1.GetNamespace() < acc2.GetNamespace() + } + return acc1.GetName() < acc2.GetName() + }) + return res, nil +} + +func DefaultWatchReactor(watchInterface watch.Interface, err error) WatchReactionFunc { + return func(action Action) (bool, watch.Interface, error) { + return true, watchInterface, err + } +} + +// SimpleReactor is a Reactor. Each reaction function is attached to a given verb,resource tuple. "*" in either field matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleReactor struct { + Verb string + Resource string + + Reaction ReactionFunc +} + +func (r *SimpleReactor) Handles(action Action) bool { + verbCovers := r.Verb == "*" || r.Verb == action.GetVerb() + if !verbCovers { + return false + } + + return resourceCovers(r.Resource, action) +} + +func (r *SimpleReactor) React(action Action) (bool, runtime.Object, error) { + return r.Reaction(action) +} + +// SimpleWatchReactor is a WatchReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleWatchReactor struct { + Resource string + + Reaction WatchReactionFunc +} + +func (r *SimpleWatchReactor) Handles(action Action) bool { + return resourceCovers(r.Resource, action) +} + +func (r *SimpleWatchReactor) React(action Action) (bool, watch.Interface, error) { + return r.Reaction(action) +} + +// SimpleProxyReactor is a ProxyReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions. +type SimpleProxyReactor struct { + Resource string + + Reaction ProxyReactionFunc +} + +func (r *SimpleProxyReactor) Handles(action Action) bool { + return resourceCovers(r.Resource, action) +} + +func (r *SimpleProxyReactor) React(action Action) (bool, restclient.ResponseWrapper, error) { + return r.Reaction(action) +} + +func resourceCovers(resource string, action Action) bool { + if resource == "*" { + return true + } + + if resource == action.GetResource().Resource { + return true + } + + if index := strings.Index(resource, "/"); index != -1 && + resource[:index] == action.GetResource().Resource && + resource[index+1:] == action.GetSubresource() { + return true + } + + return false +} diff --git a/vendor/k8s.io/client-go/testing/interface.go b/vendor/k8s.io/client-go/testing/interface.go new file mode 100644 index 000000000..266c6ba3f --- /dev/null +++ b/vendor/k8s.io/client-go/testing/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +type FakeClient interface { + // Tracker gives access to the ObjectTracker internal to the fake client. + Tracker() ObjectTracker + + // AddReactor appends a reactor to the end of the chain. + AddReactor(verb, resource string, reaction ReactionFunc) + + // PrependReactor adds a reactor to the beginning of the chain. + PrependReactor(verb, resource string, reaction ReactionFunc) + + // AddWatchReactor appends a reactor to the end of the chain. + AddWatchReactor(resource string, reaction WatchReactionFunc) + + // PrependWatchReactor adds a reactor to the beginning of the chain. + PrependWatchReactor(resource string, reaction WatchReactionFunc) + + // AddProxyReactor appends a reactor to the end of the chain. + AddProxyReactor(resource string, reaction ProxyReactionFunc) + + // PrependProxyReactor adds a reactor to the beginning of the chain. + PrependProxyReactor(resource string, reaction ProxyReactionFunc) + + // Invokes records the provided Action and then invokes the ReactionFunc that + // handles the action if one exists. defaultReturnObj is expected to be of the + // same type a normal call would return. + Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) + + // InvokesWatch records the provided Action and then invokes the ReactionFunc + // that handles the action if one exists. + InvokesWatch(action Action) (watch.Interface, error) + + // InvokesProxy records the provided Action and then invokes the ReactionFunc + // that handles the action if one exists. + InvokesProxy(action Action) restclient.ResponseWrapper + + // ClearActions clears the history of actions called on the fake client. + ClearActions() + + // Actions returns a chronologically ordered slice fake actions called on the + // fake client. + Actions() []Action +} diff --git a/vendor/k8s.io/component-base/featuregate/OWNERS b/vendor/k8s.io/component-base/featuregate/OWNERS new file mode 100644 index 000000000..b2f165b6d --- /dev/null +++ b/vendor/k8s.io/component-base/featuregate/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Currently assigned to api-approvers since feature gates are the API +# for enabling/disabling other APIs. + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change + - sig/api-machinery + - sig/cluster-lifecycle diff --git a/vendor/k8s.io/component-base/featuregate/feature_gate.go b/vendor/k8s.io/component-base/featuregate/feature_gate.go new file mode 100644 index 000000000..a826b0e67 --- /dev/null +++ b/vendor/k8s.io/component-base/featuregate/feature_gate.go @@ -0,0 +1,385 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package featuregate + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/util/naming" + featuremetrics "k8s.io/component-base/metrics/prometheus/feature" + "k8s.io/klog/v2" +) + +type Feature string + +const ( + flagName = "feature-gates" + + // allAlphaGate is a global toggle for alpha features. Per-feature key + // values override the default set by allAlphaGate. Examples: + // AllAlpha=false,NewFeature=true will result in newFeature=true + // AllAlpha=true,NewFeature=false will result in newFeature=false + allAlphaGate Feature = "AllAlpha" + + // allBetaGate is a global toggle for beta features. Per-feature key + // values override the default set by allBetaGate. Examples: + // AllBeta=false,NewFeature=true will result in NewFeature=true + // AllBeta=true,NewFeature=false will result in NewFeature=false + allBetaGate Feature = "AllBeta" +) + +var ( + // The generic features. + defaultFeatures = map[Feature]FeatureSpec{ + allAlphaGate: {Default: false, PreRelease: Alpha}, + allBetaGate: {Default: false, PreRelease: Beta}, + } + + // Special handling for a few gates. + specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){ + allAlphaGate: setUnsetAlphaGates, + allBetaGate: setUnsetBetaGates, + } +) + +type FeatureSpec struct { + // Default is the default enablement state for the feature + Default bool + // LockToDefault indicates that the feature is locked to its default and cannot be changed + LockToDefault bool + // PreRelease indicates the maturity level of the feature + PreRelease prerelease +} + +type prerelease string + +const ( + // Values for PreRelease. + Alpha = prerelease("ALPHA") + Beta = prerelease("BETA") + GA = prerelease("") + + // Deprecated + Deprecated = prerelease("DEPRECATED") +) + +// FeatureGate indicates whether a given feature is enabled or not +type FeatureGate interface { + // Enabled returns true if the key is enabled. + Enabled(key Feature) bool + // KnownFeatures returns a slice of strings describing the FeatureGate's known features. + KnownFeatures() []string + // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be + // set on the copy without mutating the original. This is useful for validating + // config against potential feature gate changes before committing those changes. + DeepCopy() MutableFeatureGate +} + +// MutableFeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +type MutableFeatureGate interface { + FeatureGate + + // AddFlag adds a flag for setting global feature gates to the specified FlagSet. + AddFlag(fs *pflag.FlagSet) + // Set parses and stores flag gates for known features + // from a string like feature1=true,feature2=false,... + Set(value string) error + // SetFromMap stores flag gates for known features from a map[string]bool or returns an error + SetFromMap(m map[string]bool) error + // Add adds features to the featureGate. + Add(features map[Feature]FeatureSpec) error + // GetAll returns a copy of the map of known feature names to feature specs. + GetAll() map[Feature]FeatureSpec + // AddMetrics adds feature enablement metrics + AddMetrics() +} + +// featureGate implements FeatureGate as well as pflag.Value for flag parsing. +type featureGate struct { + featureGateName string + + special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool) + + // lock guards writes to known, enabled, and reads/writes of closed + lock sync.Mutex + // known holds a map[Feature]FeatureSpec + known *atomic.Value + // enabled holds a map[Feature]bool + enabled *atomic.Value + // closed is set to true when AddFlag is called, and prevents subsequent calls to Add + closed bool +} + +func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { + for k, v := range known { + if v.PreRelease == Alpha { + if _, found := enabled[k]; !found { + enabled[k] = val + } + } + } +} + +func setUnsetBetaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { + for k, v := range known { + if v.PreRelease == Beta { + if _, found := enabled[k]; !found { + enabled[k] = val + } + } + } +} + +// Set, String, and Type implement pflag.Value +var _ pflag.Value = &featureGate{} + +// internalPackages are packages that ignored when creating a name for featureGates. These packages are in the common +// call chains, so they'd be unhelpful as names. +var internalPackages = []string{"k8s.io/component-base/featuregate/feature_gate.go"} + +func NewFeatureGate() *featureGate { + known := map[Feature]FeatureSpec{} + for k, v := range defaultFeatures { + known[k] = v + } + + knownValue := &atomic.Value{} + knownValue.Store(known) + + enabled := map[Feature]bool{} + enabledValue := &atomic.Value{} + enabledValue.Store(enabled) + + f := &featureGate{ + featureGateName: naming.GetNameFromCallsite(internalPackages...), + known: knownValue, + special: specialFeatures, + enabled: enabledValue, + } + return f +} + +// Set parses a string of the form "key1=value1,key2=value2,..." into a +// map[string]bool of known keys or returns an error. +func (f *featureGate) Set(value string) error { + m := make(map[string]bool) + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + k := strings.TrimSpace(arr[0]) + if len(arr) != 2 { + return fmt.Errorf("missing bool value for %s", k) + } + v := strings.TrimSpace(arr[1]) + boolValue, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("invalid value of %s=%s, err: %v", k, v, err) + } + m[k] = boolValue + } + return f.SetFromMap(m) +} + +// SetFromMap stores flag gates for known features from a map[string]bool or returns an error +func (f *featureGate) SetFromMap(m map[string]bool) error { + f.lock.Lock() + defer f.lock.Unlock() + + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + for k, v := range m { + k := Feature(k) + featureSpec, ok := known[k] + if !ok { + return fmt.Errorf("unrecognized feature gate: %s", k) + } + if featureSpec.LockToDefault && featureSpec.Default != v { + return fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default) + } + enabled[k] = v + // Handle "special" features like "all alpha gates" + if fn, found := f.special[k]; found { + fn(known, enabled, v) + } + + if featureSpec.PreRelease == Deprecated { + klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) + } else if featureSpec.PreRelease == GA { + klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) + } + } + + // Persist changes + f.known.Store(known) + f.enabled.Store(enabled) + + klog.V(1).Infof("feature gates: %v", f.enabled) + return nil +} + +// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...". +func (f *featureGate) String() string { + pairs := []string{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (f *featureGate) Type() string { + return "mapStringBool" +} + +// Add adds features to the featureGate. +func (f *featureGate) Add(features map[Feature]FeatureSpec) error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.closed { + return fmt.Errorf("cannot add a feature gate after adding it to the flag set") + } + + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + + for name, spec := range features { + if existingSpec, found := known[name]; found { + if existingSpec == spec { + continue + } + return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) + } + + known[name] = spec + } + + // Persist updated state + f.known.Store(known) + + return nil +} + +// GetAll returns a copy of the map of known feature names to feature specs. +func (f *featureGate) GetAll() map[Feature]FeatureSpec { + retval := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + retval[k] = v + } + return retval +} + +// Enabled returns true if the key is enabled. If the key is not known, this call will panic. +func (f *featureGate) Enabled(key Feature) bool { + if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok { + return v + } + if v, ok := f.known.Load().(map[Feature]FeatureSpec)[key]; ok { + return v.Default + } + + panic(fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName)) +} + +// AddFlag adds a flag for setting global feature gates to the specified FlagSet. +func (f *featureGate) AddFlag(fs *pflag.FlagSet) { + f.lock.Lock() + // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead? + // Not all components expose a feature gates flag using this AddFlag method, and + // in the future, all components will completely stop exposing a feature gates flag, + // in favor of componentconfig. + f.closed = true + f.lock.Unlock() + + known := f.KnownFeatures() + fs.Var(f, flagName, ""+ + "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ + "Options are:\n"+strings.Join(known, "\n")) +} + +func (f *featureGate) AddMetrics() { + for feature, featureSpec := range f.GetAll() { + featuremetrics.RecordFeatureInfo(context.Background(), string(feature), string(featureSpec.PreRelease), f.Enabled(feature)) + } +} + +// KnownFeatures returns a slice of strings describing the FeatureGate's known features. +// Deprecated and GA features are hidden from the list. +func (f *featureGate) KnownFeatures() []string { + var known []string + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + if v.PreRelease == GA || v.PreRelease == Deprecated { + continue + } + known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v.PreRelease, v.Default)) + } + sort.Strings(known) + return known +} + +// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be +// set on the copy without mutating the original. This is useful for validating +// config against potential feature gate changes before committing those changes. +func (f *featureGate) DeepCopy() MutableFeatureGate { + // Copy existing state. + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + // Store copied state in new atomics. + knownValue := &atomic.Value{} + knownValue.Store(known) + enabledValue := &atomic.Value{} + enabledValue.Store(enabled) + + // Construct a new featureGate around the copied state. + // Note that specialFeatures is treated as immutable by convention, + // and we maintain the value of f.closed across the copy. + return &featureGate{ + special: specialFeatures, + known: knownValue, + enabled: enabledValue, + closed: f.closed, + } +} diff --git a/vendor/k8s.io/component-base/metrics/OWNERS b/vendor/k8s.io/component-base/metrics/OWNERS new file mode 100644 index 000000000..be371a4a0 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-instrumentation-approvers + - logicalhan + - RainbowMango +reviewers: + - sig-instrumentation-reviewers + - YoyinZyc +labels: + - sig/instrumentation diff --git a/vendor/k8s.io/component-base/metrics/buckets.go b/vendor/k8s.io/component-base/metrics/buckets.go new file mode 100644 index 000000000..48d3093e0 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/buckets.go @@ -0,0 +1,43 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// DefBuckets is a wrapper for prometheus.DefBuckets +var DefBuckets = prometheus.DefBuckets + +// LinearBuckets is a wrapper for prometheus.LinearBuckets. +func LinearBuckets(start, width float64, count int) []float64 { + return prometheus.LinearBuckets(start, width, count) +} + +// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets. +func ExponentialBuckets(start, factor float64, count int) []float64 { + return prometheus.ExponentialBuckets(start, factor, count) +} + +// MergeBuckets merges buckets together +func MergeBuckets(buckets ...[]float64) []float64 { + result := make([]float64, 1) + for _, s := range buckets { + result = append(result, s...) + } + return result +} diff --git a/vendor/k8s.io/component-base/metrics/collector.go b/vendor/k8s.io/component-base/metrics/collector.go new file mode 100644 index 000000000..0718b6e13 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/collector.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +// StableCollector extends the prometheus.Collector interface to allow customization of the +// metric registration process, it's especially intend to be used in scenario of custom collector. +type StableCollector interface { + prometheus.Collector + + // DescribeWithStability sends the super-set of all possible metrics.Desc collected + // by this StableCollector to the provided channel. + DescribeWithStability(chan<- *Desc) + + // CollectWithStability sends each collected metrics.Metric via the provide channel. + CollectWithStability(chan<- Metric) + + // Create will initialize all Desc and it intends to be called by registry. + Create(version *semver.Version, self StableCollector) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // HiddenMetrics tells the list of hidden metrics with fqName. + HiddenMetrics() []string +} + +// BaseStableCollector which implements almost all methods defined by StableCollector +// is a convenient assistant for custom collectors. +// It is recommended to inherit BaseStableCollector when implementing custom collectors. +type BaseStableCollector struct { + descriptors map[string]*Desc // stores all descriptors by pair, these are collected from DescribeWithStability(). + registerable map[string]*Desc // stores registerable descriptors by pair, is a subset of descriptors. + hidden map[string]*Desc // stores hidden descriptors by pair, is a subset of descriptors. + self StableCollector +} + +// DescribeWithStability sends all descriptors to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) DescribeWithStability(ch chan<- *Desc) { + panic(fmt.Errorf("custom collector should over-write DescribeWithStability method")) +} + +// Describe sends all descriptors to the provided channel. +// It intended to be called by prometheus registry. +func (bsc *BaseStableCollector) Describe(ch chan<- *prometheus.Desc) { + for _, d := range bsc.registerable { + ch <- d.toPrometheusDesc() + } +} + +// CollectWithStability sends all metrics to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) CollectWithStability(ch chan<- Metric) { + panic(fmt.Errorf("custom collector should over-write CollectWithStability method")) +} + +// Collect is called by the Prometheus registry when collecting metrics. +func (bsc *BaseStableCollector) Collect(ch chan<- prometheus.Metric) { + mch := make(chan Metric) + + go func() { + bsc.self.CollectWithStability(mch) + close(mch) + }() + + for m := range mch { + // nil Metric usually means hidden metrics + if m == nil { + continue + } + + ch <- prometheus.Metric(m) + } +} + +func (bsc *BaseStableCollector) add(d *Desc) { + if len(d.fqName) == 0 { + panic("nameless metrics will be not allowed") + } + + if bsc.descriptors == nil { + bsc.descriptors = make(map[string]*Desc) + } + + if _, exist := bsc.descriptors[d.fqName]; exist { + panic(fmt.Sprintf("duplicate metrics (%s) will be not allowed", d.fqName)) + } + + bsc.descriptors[d.fqName] = d +} + +// Init intends to be called by registry. +func (bsc *BaseStableCollector) init(self StableCollector) { + bsc.self = self + + dch := make(chan *Desc) + + // collect all possible descriptions from custom side + go func() { + bsc.self.DescribeWithStability(dch) + close(dch) + }() + + for d := range dch { + bsc.add(d) + } +} + +func (bsc *BaseStableCollector) trackRegistrableDescriptor(d *Desc) { + if bsc.registerable == nil { + bsc.registerable = make(map[string]*Desc) + } + + bsc.registerable[d.fqName] = d +} + +func (bsc *BaseStableCollector) trackHiddenDescriptor(d *Desc) { + if bsc.hidden == nil { + bsc.hidden = make(map[string]*Desc) + } + + bsc.hidden[d.fqName] = d +} + +// Create intends to be called by registry. +// Create will return true as long as there is one or more metrics not be hidden. +// Otherwise return false, that means the whole collector will be ignored by registry. +func (bsc *BaseStableCollector) Create(version *semver.Version, self StableCollector) bool { + bsc.init(self) + + for _, d := range bsc.descriptors { + d.create(version) + if d.IsHidden() { + bsc.trackHiddenDescriptor(d) + } else { + bsc.trackRegistrableDescriptor(d) + } + } + + if len(bsc.registerable) > 0 { + return true + } + + return false +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (bsc *BaseStableCollector) ClearState() { + for _, d := range bsc.descriptors { + d.ClearState() + } + + bsc.descriptors = nil + bsc.registerable = nil + bsc.hidden = nil + bsc.self = nil +} + +// HiddenMetrics tells the list of hidden metrics with fqName. +func (bsc *BaseStableCollector) HiddenMetrics() (fqNames []string) { + for i := range bsc.hidden { + fqNames = append(fqNames, bsc.hidden[i].fqName) + } + return +} + +// Check if our BaseStableCollector implements necessary interface +var _ StableCollector = &BaseStableCollector{} diff --git a/vendor/k8s.io/component-base/metrics/counter.go b/vendor/k8s.io/component-base/metrics/counter.go new file mode 100644 index 000000000..5664a68a9 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/counter.go @@ -0,0 +1,242 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// Counter is our internal representation for our wrapping struct around prometheus +// counters. Counter implements both kubeCollector and CounterMetric. +type Counter struct { + CounterMetric + *CounterOpts + lazyMetric + selfCollector +} + +// The implementation of the Metric interface is expected by testutil.GetCounterMetricValue. +var _ Metric = &Counter{} + +// NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewCounter(opts *CounterOpts) *Counter { + opts.StabilityLevel.setDefaults() + + kc := &Counter{ + CounterOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + kc.setPrometheusCounter(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +func (c *Counter) Desc() *prometheus.Desc { + return c.metric.Desc() +} + +func (c *Counter) Write(to *dto.Metric) error { + return c.metric.Write(to) +} + +// Reset resets the underlying prometheus Counter to start counting from 0 again +func (c *Counter) Reset() { + if !c.IsCreated() { + return + } + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// setPrometheusCounter sets the underlying CounterMetric object, i.e. the thing that does the measurement. +func (c *Counter) setPrometheusCounter(counter prometheus.Counter) { + c.CounterMetric = counter + c.initSelfCollection(counter) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (c *Counter) DeprecatedVersion() *semver.Version { + return parseSemver(c.CounterOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Counter. Until this method is called +// the underlying counter is a no-op. +func (c *Counter) initializeMetric() { + c.CounterOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus counter. + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Counter. Until this method +// is called the underlying counter is a no-op. +func (c *Counter) initializeDeprecatedMetric() { + c.CounterOpts.markDeprecated() + c.initializeMetric() +} + +// WithContext allows the normal Counter metric to pass in context. The context is no-op now. +func (c *Counter) WithContext(ctx context.Context) CounterMetric { + return c.CounterMetric +} + +// CounterVec is the internal representation of our wrapping struct around prometheus +// counterVecs. CounterVec implements both kubeCollector and CounterVecMetric. +type CounterVec struct { + *prometheus.CounterVec + *CounterOpts + lazyMetric + originalLabels []string +} + +var _ kubeCollector = &CounterVec{} + +// TODO: make this true: var _ CounterVecMetric = &CounterVec{} + +// NewCounterVec returns an object which satisfies the kubeCollector and (almost) CounterVecMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated, and only members extracted after +// registration will actually measure anything. +func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + cv := &CounterVec{ + CounterVec: noopCounterVec, + CounterOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + cv.lazyInit(cv, fqName) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *CounterVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.CounterOpts.DeprecatedVersion) + +} + +// initializeMetric invocation creates the actual underlying CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeMetric() { + v.CounterOpts.annotateStabilityLevel() + v.CounterVec = prometheus.NewCounterVec(v.CounterOpts.toPromCounterOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeDeprecatedMetric() { + v.CounterOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/counter.go#L179-L197 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created IFF the counterVec +// has been registered to a metrics registry. +func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.CounterVec.WithLabelValues(lvs...) +} + +// With returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created IFF the counterVec has +// been registered to a metrics registry. +func (v *CounterVec) With(labels map[string]string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.CounterVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *CounterVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.CounterVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *CounterVec) Reset() { + if !v.IsCreated() { + return + } + + v.CounterVec.Reset() +} + +// WithContext returns wrapped CounterVec with context +func (v *CounterVec) WithContext(ctx context.Context) *CounterVecWithContext { + return &CounterVecWithContext{ + ctx: ctx, + CounterVec: v, + } +} + +// CounterVecWithContext is the wrapper of CounterVec with context. +type CounterVecWithContext struct { + *CounterVec + ctx context.Context +} + +// WithLabelValues is the wrapper of CounterVec.WithLabelValues. +func (vc *CounterVecWithContext) WithLabelValues(lvs ...string) CounterMetric { + return vc.CounterVec.WithLabelValues(lvs...) +} + +// With is the wrapper of CounterVec.With. +func (vc *CounterVecWithContext) With(labels map[string]string) CounterMetric { + return vc.CounterVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/desc.go b/vendor/k8s.io/component-base/metrics/desc.go new file mode 100644 index 000000000..2ca9cfa7c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/desc.go @@ -0,0 +1,225 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/klog/v2" +) + +// Desc is a prometheus.Desc extension. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabels is the label names. Their label values are variable. + constLabels Labels + // variableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + + // promDesc is the descriptor used by every Prometheus Metric. + promDesc *prometheus.Desc + annotatedHelp string + + // stabilityLevel represents the API guarantees for a given defined metric. + stabilityLevel StabilityLevel + // deprecatedVersion represents in which version this metric be deprecated. + deprecatedVersion string + + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + deprecateOnce sync.Once + hideOnce sync.Once + annotateOnce sync.Once +} + +// NewDesc extends prometheus.NewDesc with stability support. +// +// The stabilityLevel should be valid stability label, such as "metrics.ALPHA" +// and "metrics.STABLE"(Maybe "metrics.BETA" in future). Default value "metrics.ALPHA" +// will be used in case of empty or invalid stability label. +// +// The deprecatedVersion represents in which version this Metric be deprecated. +// The deprecation policy outlined by the control plane metrics stability KEP. +func NewDesc(fqName string, help string, variableLabels []string, constLabels Labels, + stabilityLevel StabilityLevel, deprecatedVersion string) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + annotatedHelp: help, + variableLabels: variableLabels, + constLabels: constLabels, + stabilityLevel: stabilityLevel, + deprecatedVersion: deprecatedVersion, + } + d.stabilityLevel.setDefaults() + + return d +} + +// String formats the Desc as a string. +// The stability metadata maybe annotated in 'HELP' section if called after registry, +// otherwise not. +// e.g. "Desc{fqName: "normal_stable_descriptor", help: "[STABLE] this is a stable descriptor", constLabels: {}, variableLabels: []}" +func (d *Desc) String() string { + if d.isCreated { + return d.promDesc.String() + } + + return prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)).String() +} + +// toPrometheusDesc transform self to prometheus.Desc +func (d *Desc) toPrometheusDesc() *prometheus.Desc { + return d.promDesc +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (d *Desc) DeprecatedVersion() *semver.Version { + return parseSemver(d.deprecatedVersion) + +} + +func (d *Desc) determineDeprecationStatus(version semver.Version) { + selfVersion := d.DeprecatedVersion() + if selfVersion == nil { + return + } + d.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + d.isDeprecated = true + } + if ShouldShowHidden() { + klog.Warningf("Hidden metrics(%s) have been manually overridden, showing this very deprecated metric.", d.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric(%s) has been deprecated for more than one release, hiding.", d.fqName) + d.isHidden = true + } + }) +} + +// IsHidden returns if metric will be hidden +func (d *Desc) IsHidden() bool { + return d.isHidden +} + +// IsDeprecated returns if metric has been deprecated +func (d *Desc) IsDeprecated() bool { + return d.isDeprecated +} + +// IsCreated returns if metric has been created. +func (d *Desc) IsCreated() bool { + d.createLock.RLock() + defer d.createLock.RUnlock() + + return d.isCreated +} + +// create forces the initialization of Desc which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the Desc is deprecated or hidden, no-opting if the Desc should be considered +// hidden. Furthermore, this function no-opts and returns true if Desc is already +// created. +func (d *Desc) create(version *semver.Version) bool { + if version != nil { + d.determineDeprecationStatus(*version) + } + + // let's not create if this metric is slated to be hidden + if d.IsHidden() { + return false + } + d.createOnce.Do(func() { + d.createLock.Lock() + defer d.createLock.Unlock() + + d.isCreated = true + if d.IsDeprecated() { + d.initializeDeprecatedDesc() + } else { + d.initialize() + } + }) + return d.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (d *Desc) ClearState() { + d.isDeprecated = false + d.isHidden = false + d.isCreated = false + + d.markDeprecationOnce = *new(sync.Once) + d.createOnce = *new(sync.Once) + d.deprecateOnce = *new(sync.Once) + d.hideOnce = *new(sync.Once) + d.annotateOnce = *new(sync.Once) + + d.annotatedHelp = d.help + d.promDesc = nil +} + +func (d *Desc) markDeprecated() { + d.deprecateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("(Deprecated since %s) %s", d.deprecatedVersion, d.annotatedHelp) + }) +} + +func (d *Desc) annotateStabilityLevel() { + d.annotateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("[%v] %v", d.stabilityLevel, d.annotatedHelp) + }) +} + +func (d *Desc) initialize() { + d.annotateStabilityLevel() + + // this actually creates the underlying prometheus desc. + d.promDesc = prometheus.NewDesc(d.fqName, d.annotatedHelp, d.variableLabels, prometheus.Labels(d.constLabels)) +} + +func (d *Desc) initializeDeprecatedDesc() { + d.markDeprecated() + d.initialize() +} + +// GetRawDesc will returns a new *Desc with original parameters provided to NewDesc(). +// +// It will be useful in testing scenario that the same Desc be registered to different registry. +// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created) +// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B') +func (d *Desc) GetRawDesc() *Desc { + return NewDesc(d.fqName, d.help, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion) +} diff --git a/vendor/k8s.io/component-base/metrics/gauge.go b/vendor/k8s.io/component-base/metrics/gauge.go new file mode 100644 index 000000000..89631115a --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/gauge.go @@ -0,0 +1,277 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/component-base/version" +) + +// Gauge is our internal representation for our wrapping struct around prometheus +// gauges. kubeGauge implements both kubeCollector and KubeGauge. +type Gauge struct { + GaugeMetric + *GaugeOpts + lazyMetric + selfCollector +} + +var _ GaugeMetric = &Gauge{} +var _ Registerable = &Gauge{} +var _ kubeCollector = &Gauge{} + +// NewGauge returns an object which satisfies the kubeCollector, Registerable, and Gauge interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGauge(opts *GaugeOpts) *Gauge { + opts.StabilityLevel.setDefaults() + + kc := &Gauge{ + GaugeOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + kc.setPrometheusGauge(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +// setPrometheusGauge sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (g *Gauge) setPrometheusGauge(gauge prometheus.Gauge) { + g.GaugeMetric = gauge + g.initSelfCollection(gauge) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (g *Gauge) DeprecatedVersion() *semver.Version { + return parseSemver(g.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Gauge. Until this method is called +// the underlying gauge is a no-op. +func (g *Gauge) initializeMetric() { + g.GaugeOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + g.setPrometheusGauge(prometheus.NewGauge(g.GaugeOpts.toPromGaugeOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Gauge. Until this method +// is called the underlying gauge is a no-op. +func (g *Gauge) initializeDeprecatedMetric() { + g.GaugeOpts.markDeprecated() + g.initializeMetric() +} + +// WithContext allows the normal Gauge metric to pass in context. The context is no-op now. +func (g *Gauge) WithContext(ctx context.Context) GaugeMetric { + return g.GaugeMetric +} + +// GaugeVec is the internal representation of our wrapping struct around prometheus +// gaugeVecs. kubeGaugeVec implements both kubeCollector and KubeGaugeVec. +type GaugeVec struct { + *prometheus.GaugeVec + *GaugeOpts + lazyMetric + originalLabels []string +} + +var _ GaugeVecMetric = &GaugeVec{} +var _ Registerable = &GaugeVec{} +var _ kubeCollector = &GaugeVec{} + +// NewGaugeVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated, and only members extracted after +// registration will actually measure anything. +func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + cv := &GaugeVec{ + GaugeVec: noopGaugeVec, + GaugeOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + cv.lazyInit(cv, fqName) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *GaugeVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeMetric() { + v.GaugeOpts.annotateStabilityLevel() + v.GaugeVec = prometheus.NewGaugeVec(v.GaugeOpts.toPromGaugeOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeDeprecatedMetric() { + v.GaugeOpts.markDeprecated() + v.initializeMetric() +} + +func (v *GaugeVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered // return no-op gauge + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + elt, err := v.GaugeVec.GetMetricWithLabelValues(lvs...) + return elt, err +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the GaugeMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new GaugeMetric is created IFF the gaugeVec +// has been registered to a metrics registry. +func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric { + ans, err := v.WithLabelValuesChecked(lvs...) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +func (v *GaugeVec) WithChecked(labels map[string]string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered // return no-op gauge + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + elt, err := v.GaugeVec.GetMetricWith(labels) + return elt, err +} + +// With returns the GaugeMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has +// been registered to a metrics registry. +func (v *GaugeVec) With(labels map[string]string) GaugeMetric { + ans, err := v.WithChecked(labels) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *GaugeVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.GaugeVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *GaugeVec) Reset() { + if !v.IsCreated() { + return + } + + v.GaugeVec.Reset() +} + +func newGaugeFunc(opts *GaugeOpts, function func() float64, v semver.Version) GaugeFunc { + g := NewGauge(opts) + + if !g.Create(&v) { + return nil + } + + return prometheus.NewGaugeFunc(g.GaugeOpts.toPromGaugeOpts(), function) +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts *GaugeOpts, function func() float64) GaugeFunc { + v := parseVersion(version.Get()) + + return newGaugeFunc(opts, function, v) +} + +// WithContext returns wrapped GaugeVec with context +func (v *GaugeVec) WithContext(ctx context.Context) *GaugeVecWithContext { + return &GaugeVecWithContext{ + ctx: ctx, + GaugeVec: v, + } +} + +func (v *GaugeVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric { + return v.WithContext(ctx) +} + +// GaugeVecWithContext is the wrapper of GaugeVec with context. +type GaugeVecWithContext struct { + *GaugeVec + ctx context.Context +} + +// WithLabelValues is the wrapper of GaugeVec.WithLabelValues. +func (vc *GaugeVecWithContext) WithLabelValues(lvs ...string) GaugeMetric { + return vc.GaugeVec.WithLabelValues(lvs...) +} + +// With is the wrapper of GaugeVec.With. +func (vc *GaugeVecWithContext) With(labels map[string]string) GaugeMetric { + return vc.GaugeVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/histogram.go b/vendor/k8s.io/component-base/metrics/histogram.go new file mode 100644 index 000000000..e6884f35c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/histogram.go @@ -0,0 +1,214 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +// Histogram is our internal representation for our wrapping struct around prometheus +// histograms. Summary implements both kubeCollector and ObserverMetric +type Histogram struct { + ObserverMetric + *HistogramOpts + lazyMetric + selfCollector +} + +// NewHistogram returns an object which is Histogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewHistogram(opts *HistogramOpts) *Histogram { + opts.StabilityLevel.setDefaults() + + h := &Histogram{ + HistogramOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *Histogram) setPrometheusHistogram(histogram prometheus.Histogram) { + h.ObserverMetric = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *Histogram) DeprecatedVersion() *semver.Version { + return parseSemver(h.HistogramOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *Histogram) initializeMetric() { + h.HistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + h.setPrometheusHistogram(prometheus.NewHistogram(h.HistogramOpts.toPromHistogramOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *Histogram) initializeDeprecatedMetric() { + h.HistogramOpts.markDeprecated() + h.initializeMetric() +} + +// WithContext allows the normal Histogram metric to pass in context. The context is no-op now. +func (h *Histogram) WithContext(ctx context.Context) ObserverMetric { + return h.ObserverMetric +} + +// HistogramVec is the internal representation of our wrapping struct around prometheus +// histogramVecs. +type HistogramVec struct { + *prometheus.HistogramVec + *HistogramOpts + lazyMetric + originalLabels []string +} + +// NewHistogramVec returns an object which satisfies kubeCollector and wraps the +// prometheus.HistogramVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated, +// and only members extracted after +// registration will actually measure anything. + +func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &HistogramVec{ + HistogramVec: noopHistogramVec, + HistogramOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *HistogramVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.HistogramOpts.DeprecatedVersion) +} + +func (v *HistogramVec) initializeMetric() { + v.HistogramOpts.annotateStabilityLevel() + v.HistogramVec = prometheus.NewHistogramVec(v.HistogramOpts.toPromHistogramOpts(), v.originalLabels) +} + +func (v *HistogramVec) initializeDeprecatedMetric() { + v.HistogramOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the HistogramVec +// has been registered to a metrics registry. +func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.HistogramVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has +// been registered to a metrics registry. +func (v *HistogramVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.HistogramVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *HistogramVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.HistogramVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *HistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.HistogramVec.Reset() +} + +// WithContext returns wrapped HistogramVec with context +func (v *HistogramVec) WithContext(ctx context.Context) *HistogramVecWithContext { + return &HistogramVecWithContext{ + ctx: ctx, + HistogramVec: v, + } +} + +// HistogramVecWithContext is the wrapper of HistogramVec with context. +type HistogramVecWithContext struct { + *HistogramVec + ctx context.Context +} + +// WithLabelValues is the wrapper of HistogramVec.WithLabelValues. +func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { + return vc.HistogramVec.WithLabelValues(lvs...) +} + +// With is the wrapper of HistogramVec.With. +func (vc *HistogramVecWithContext) With(labels map[string]string) ObserverMetric { + return vc.HistogramVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/http.go b/vendor/k8s.io/component-base/metrics/http.go new file mode 100644 index 000000000..2a0d249c2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/http.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "io" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var ( + processStartedAt time.Time +) + +func init() { + processStartedAt = time.Now() +} + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // HTTPErrorOnError serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError promhttp.HandlerErrorHandling = iota + + // ContinueOnError ignore errors and try to serve as many metrics as possible. + // However, if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + + // PanicOnError panics upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts promhttp.HandlerOpts + +func (ho *HandlerOpts) toPromhttpHandlerOpts() promhttp.HandlerOpts { + ho.ProcessStartTime = processStartedAt + return promhttp.HandlerOpts(*ho) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg Gatherer, opts HandlerOpts) http.Handler { + return promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) +} + +// HandlerWithReset return an http.Handler with Reset +func HandlerWithReset(reg KubeRegistry, opts HandlerOpts) http.Handler { + defaultHandler := promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodDelete { + reg.Reset() + io.WriteString(w, "metrics reset\n") + return + } + defaultHandler.ServeHTTP(w, r) + }) +} diff --git a/vendor/k8s.io/component-base/metrics/labels.go b/vendor/k8s.io/component-base/metrics/labels.go new file mode 100644 index 000000000..11af3ae42 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/labels.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Labels represents a collection of label name -> value mappings. +type Labels prometheus.Labels diff --git a/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go new file mode 100644 index 000000000..64a430b79 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package legacyregistry + +import ( + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "k8s.io/component-base/metrics" +) + +var ( + defaultRegistry = metrics.NewKubeRegistry() + // DefaultGatherer exposes the global registry gatherer + DefaultGatherer metrics.Gatherer = defaultRegistry + // Reset calls reset on the global registry + Reset = defaultRegistry.Reset + // MustRegister registers registerable metrics but uses the global registry. + MustRegister = defaultRegistry.MustRegister + // RawMustRegister registers prometheus collectors but uses the global registry, this + // bypasses the metric stability framework + // + // Deprecated + RawMustRegister = defaultRegistry.RawMustRegister + + // Register registers a collectable metric but uses the global registry + Register = defaultRegistry.Register + + // Registerer exposes the global registerer + Registerer = defaultRegistry.Registerer + + processStart time.Time +) + +func init() { + RawMustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + RawMustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll))) + defaultRegistry.RegisterMetaMetrics() + processStart = time.Now() +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +func Handler() http.Handler { + return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{ProcessStartTime: processStart})) +} + +// HandlerWithReset returns an HTTP handler for the DefaultGatherer but invokes +// registry reset if the http method is DELETE. +func HandlerWithReset() http.Handler { + return promhttp.InstrumentMetricHandler( + prometheus.DefaultRegisterer, + metrics.HandlerWithReset(defaultRegistry, metrics.HandlerOpts{ProcessStartTime: processStart})) +} + +// CustomRegister registers a custom collector but uses the global registry. +func CustomRegister(c metrics.StableCollector) error { + err := defaultRegistry.CustomRegister(c) + + //TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13) + _ = prometheus.Register(c) + + return err +} + +// CustomMustRegister registers custom collectors but uses the global registry. +func CustomMustRegister(cs ...metrics.StableCollector) { + defaultRegistry.CustomMustRegister(cs...) + + for _, c := range cs { + prometheus.MustRegister(c) + } +} diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go new file mode 100644 index 000000000..3b22d21ef --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -0,0 +1,235 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + promext "k8s.io/component-base/metrics/prometheusextension" + + "k8s.io/klog/v2" +) + +/* +kubeCollector extends the prometheus.Collector interface to allow customization of the metric +registration process. Defer metric initialization until Create() is called, which then +delegates to the underlying metric's initializeMetric or initializeDeprecatedMetric +method call depending on whether the metric is deprecated or not. +*/ +type kubeCollector interface { + Collector + lazyKubeMetric + DeprecatedVersion() *semver.Version + // Each collector metric should provide an initialization function + // for both deprecated and non-deprecated variants of a metric. This + // is necessary since metric instantiation will be deferred + // until the metric is actually registered somewhere. + initializeMetric() + initializeDeprecatedMetric() +} + +/* +lazyKubeMetric defines our metric registration interface. lazyKubeMetric objects are expected +to lazily instantiate metrics (i.e defer metric instantiation until when +the Create() function is explicitly called). +*/ +type lazyKubeMetric interface { + Create(*semver.Version) bool + IsCreated() bool + IsHidden() bool + IsDeprecated() bool +} + +/* +lazyMetric implements lazyKubeMetric. A lazy metric is lazy because it waits until metric +registration time before instantiation. Add it as an anonymous field to a struct that +implements kubeCollector to get deferred registration behavior. You must call lazyInit +with the kubeCollector itself as an argument. +*/ +type lazyMetric struct { + fqName string + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + self kubeCollector + stabilityLevel StabilityLevel +} + +func (r *lazyMetric) IsCreated() bool { + r.createLock.RLock() + defer r.createLock.RUnlock() + return r.isCreated +} + +// lazyInit provides the lazyMetric with a reference to the kubeCollector it is supposed +// to allow lazy initialization for. It should be invoked in the factory function which creates new +// kubeCollector type objects. +func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) { + r.fqName = fqName + r.self = self +} + +// preprocessMetric figures out whether the lazy metric should be hidden or not. +// This method takes a Version argument which should be the version of the binary in which +// this code is currently being executed. A metric can be hidden under two conditions: +// 1. if the metric is deprecated and is outside the grace period (i.e. has been +// deprecated for more than one release +// 2. if the metric is manually disabled via a CLI flag. +// +// Disclaimer: disabling a metric via a CLI flag has higher precedence than +// deprecation and will override show-hidden-metrics for the explicitly +// disabled metric. +func (r *lazyMetric) preprocessMetric(version semver.Version) { + disabledMetricsLock.RLock() + defer disabledMetricsLock.RUnlock() + // disabling metrics is higher in precedence than showing hidden metrics + if _, ok := disabledMetrics[r.fqName]; ok { + r.isHidden = true + return + } + selfVersion := r.self.DeprecatedVersion() + if selfVersion == nil { + return + } + r.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + r.isDeprecated = true + } + + if ShouldShowHidden() { + klog.Warningf("Hidden metrics (%s) have been manually overridden, showing this very deprecated metric.", r.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric has been deprecated for more than one release, hiding.") + r.isHidden = true + } + }) +} + +func (r *lazyMetric) IsHidden() bool { + return r.isHidden +} + +func (r *lazyMetric) IsDeprecated() bool { + return r.isDeprecated +} + +// Create forces the initialization of metric which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the metric is deprecated or hidden, no-opting if the metric should be considered +// hidden. Furthermore, this function no-opts and returns true if metric is already +// created. +func (r *lazyMetric) Create(version *semver.Version) bool { + if version != nil { + r.preprocessMetric(*version) + } + // let's not create if this metric is slated to be hidden + if r.IsHidden() { + return false + } + + r.createOnce.Do(func() { + r.createLock.Lock() + defer r.createLock.Unlock() + r.isCreated = true + if r.IsDeprecated() { + r.self.initializeDeprecatedMetric() + } else { + r.self.initializeMetric() + } + }) + sl := r.stabilityLevel + deprecatedV := r.self.DeprecatedVersion() + dv := "" + if deprecatedV != nil { + dv = deprecatedV.String() + } + registeredMetrics.WithLabelValues(string(sl), dv).Inc() + return r.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (r *lazyMetric) ClearState() { + r.createLock.Lock() + defer r.createLock.Unlock() + + r.isDeprecated = false + r.isHidden = false + r.isCreated = false + r.markDeprecationOnce = sync.Once{} + r.createOnce = sync.Once{} +} + +// FQName returns the fully-qualified metric name of the collector. +func (r *lazyMetric) FQName() string { + return r.fqName +} + +/* +This code is directly lifted from the prometheus codebase. It's a convenience struct which +allows you satisfy the Collector interface automatically if you already satisfy the Metric interface. + +For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/collector.go#L98-L120 +*/ +type selfCollector struct { + metric prometheus.Metric +} + +func (c *selfCollector) initSelfCollection(m prometheus.Metric) { + c.metric = m +} + +func (c *selfCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.metric.Desc() +} + +func (c *selfCollector) Collect(ch chan<- prometheus.Metric) { + ch <- c.metric +} + +// no-op vecs for convenience +var noopCounterVec = &prometheus.CounterVec{} +var noopHistogramVec = &prometheus.HistogramVec{} +var noopTimingHistogramVec = &promext.TimingHistogramVec{} +var noopGaugeVec = &prometheus.GaugeVec{} + +// just use a convenience struct for all the no-ops +var noop = &noopMetric{} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Add(float64) {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Sub(float64) {} +func (noopMetric) Observe(float64) {} +func (noopMetric) ObserveWithWeight(float64, uint64) {} +func (noopMetric) SetToCurrentTime() {} +func (noopMetric) Desc() *prometheus.Desc { return nil } +func (noopMetric) Write(*dto.Metric) error { return nil } +func (noopMetric) Describe(chan<- *prometheus.Desc) {} +func (noopMetric) Collect(chan<- prometheus.Metric) {} diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go new file mode 100644 index 000000000..7a59b7ba1 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -0,0 +1,125 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver/v4" + "github.com/spf13/pflag" + + "k8s.io/component-base/version" +) + +// Options has all parameters needed for exposing metrics from components +type Options struct { + ShowHiddenMetricsForVersion string + DisabledMetrics []string + AllowListMapping map[string]string +} + +// NewOptions returns default metrics options +func NewOptions() *Options { + return &Options{} +} + +// Validate validates metrics flags options. +func (o *Options) Validate() []error { + var errs []error + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion) + if err != nil { + errs = append(errs, err) + } + + if err := validateAllowMetricLabel(o.AllowListMapping); err != nil { + errs = append(errs, err) + } + + if len(errs) == 0 { + return nil + } + return errs +} + +// AddFlags adds flags for exposing component metrics. +func (o *Options) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion, + "The previous version for which you want to show hidden metrics. "+ + "Only the previous minor version is meaningful, other values will not be allowed. "+ + "The format is ., e.g.: '1.16'. "+ + "The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+ + "rather than being surprised when they are permanently removed in the release after that.") + fs.StringSliceVar(&o.DisabledMetrics, + "disabled-metrics", + o.DisabledMetrics, + "This flag provides an escape hatch for misbehaving metrics. "+ + "You must provide the fully qualified metric name in order to disable it. "+ + "Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.") + fs.StringToStringVar(&o.AllowListMapping, "allow-metric-labels", o.AllowListMapping, + "The map from metric-label to value allow-list of this label. The key's format is ,. "+ + "The value's format is ,..."+ + "e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.") +} + +// Apply applies parameters into global configuration of metrics. +func (o *Options) Apply() { + if o == nil { + return + } + if len(o.ShowHiddenMetricsForVersion) > 0 { + SetShowHidden() + } + // set disabled metrics + for _, metricName := range o.DisabledMetrics { + SetDisabledMetric(metricName) + } + if o.AllowListMapping != nil { + SetLabelAllowListFromCLI(o.AllowListMapping) + } +} + +func validateShowHiddenMetricsVersion(currentVersion semver.Version, targetVersionStr string) error { + if targetVersionStr == "" { + return nil + } + + validVersionStr := fmt.Sprintf("%d.%d", currentVersion.Major, currentVersion.Minor-1) + if targetVersionStr != validVersionStr { + return fmt.Errorf("--show-hidden-metrics-for-version must be omitted or have the value '%v'. Only the previous minor version is allowed", validVersionStr) + } + + return nil +} + +func validateAllowMetricLabel(allowListMapping map[string]string) error { + if allowListMapping == nil { + return nil + } + metricNameRegex := `[a-zA-Z_:][a-zA-Z0-9_:]*` + labelRegex := `[a-zA-Z_][a-zA-Z0-9_]*` + for k := range allowListMapping { + reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex) + if reg.FindString(k) != k { + return fmt.Errorf("--allow-metric-labels must has a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") + } + } + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go new file mode 100644 index 000000000..49d2d40bb --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -0,0 +1,356 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/util/sets" + promext "k8s.io/component-base/metrics/prometheusextension" +) + +var ( + labelValueAllowLists = map[string]*MetricLabelAllowList{} + allowListLock sync.RWMutex +) + +// KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure +// is purposefully not embedded here because that would change struct initialization +// in the manner which people are currently accustomed. +// +// Name must be set to a non-empty string. DeprecatedVersion is defined only +// if the metric for which this options applies is, in fact, deprecated. +type KubeOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + return prometheus.BuildFQName(namespace, subsystem, name) +} + +// StabilityLevel represents the API guarantees for a given defined metric. +type StabilityLevel string + +const ( + // INTERNAL metrics have no stability guarantees, as such, labels may + // be arbitrarily added/removed and the metric may be deleted at any time. + INTERNAL StabilityLevel = "INTERNAL" + // ALPHA metrics have no stability guarantees, as such, labels may + // be arbitrarily added/removed and the metric may be deleted at any time. + ALPHA StabilityLevel = "ALPHA" + // BETA metrics are governed by the deprecation policy outlined in by + // the control plane metrics stability KEP. + BETA StabilityLevel = "BETA" + // STABLE metrics are guaranteed not be mutated and removal is governed by + // the deprecation policy outlined in by the control plane metrics stability KEP. + STABLE StabilityLevel = "STABLE" +) + +// setDefaults takes 'ALPHA' in case of empty. +func (sl *StabilityLevel) setDefaults() { + switch *sl { + case "": + *sl = ALPHA + default: + // no-op, since we have a StabilityLevel already + } +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts KubeOpts + +// Modify help description on the metric description. +func (o *CounterOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *CounterOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *CounterOpts) toPromCounterOpts() prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts KubeOpts + +// Modify help description on the metric description. +func (o *GaugeOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *GaugeOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *HistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *HistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + } +} + +// TimingHistogramOpts bundles the options for creating a TimingHistogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type TimingHistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + InitialValue float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *TimingHistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *TimingHistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *TimingHistogramOpts) toPromHistogramOpts() promext.TimingHistogramOpts { + return promext.TimingHistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + InitialValue: o.InitialValue, + } +} + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *SummaryOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *SummaryOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// Deprecated: DefObjectives will not be used as the default objectives in +// v1.0.0 of the library. The default Summary will have no quantiles then. +var ( + defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} +) + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { + // we need to retain existing quantile behavior for backwards compatibility, + // so let's do what prometheus used to do prior to v1. + objectives := o.Objectives + if objectives == nil { + objectives = defObjectives + } + return prometheus.SummaryOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Objectives: objectives, + MaxAge: o.MaxAge, + AgeBuckets: o.AgeBuckets, + BufCap: o.BufCap, + } +} + +type MetricLabelAllowList struct { + labelToAllowList map[string]sets.String +} + +func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, labelValueList []string) { + for index, value := range labelValueList { + name := labelNameList[index] + if allowValues, ok := allowList.labelToAllowList[name]; ok { + if !allowValues.Has(value) { + labelValueList[index] = "unexpected" + } + } + } +} + +func (allowList *MetricLabelAllowList) ConstrainLabelMap(labels map[string]string) { + for name, value := range labels { + if allowValues, ok := allowList.labelToAllowList[name]; ok { + if !allowValues.Has(value) { + labels[name] = "unexpected" + } + } + } +} + +func SetLabelAllowListFromCLI(allowListMapping map[string]string) { + allowListLock.Lock() + defer allowListLock.Unlock() + for metricLabelName, labelValues := range allowListMapping { + metricName := strings.Split(metricLabelName, ",")[0] + labelName := strings.Split(metricLabelName, ",")[1] + valueSet := sets.NewString(strings.Split(labelValues, ",")...) + + allowList, ok := labelValueAllowLists[metricName] + if ok { + allowList.labelToAllowList[labelName] = valueSet + } else { + labelToAllowList := make(map[string]sets.String) + labelToAllowList[labelName] = valueSet + labelValueAllowLists[metricName] = &MetricLabelAllowList{ + labelToAllowList, + } + } + } +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime.go b/vendor/k8s.io/component-base/metrics/processstarttime.go new file mode 100644 index 000000000..4b5e76935 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "k8s.io/klog/v2" +) + +var processStartTime = NewGaugeVec( + &GaugeOpts{ + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + StabilityLevel: ALPHA, + }, + []string{}, +) + +// RegisterProcessStartTime registers the process_start_time_seconds to +// a prometheus registry. This metric needs to be included to ensure counter +// data fidelity. +func RegisterProcessStartTime(registrationFunc func(Registerable) error) error { + start, err := getProcessStart() + if err != nil { + klog.Errorf("Could not get process start time, %v", err) + start = float64(time.Now().Unix()) + } + // processStartTime is a lazy metric which only get initialized after registered. + // so we need to register the metric first and then set the value for it + if err = registrationFunc(processStartTime); err != nil { + return err + } + + processStartTime.WithLabelValues().Set(start) + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_others.go b/vendor/k8s.io/component-base/metrics/processstarttime_others.go new file mode 100644 index 000000000..a14cd8833 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime_others.go @@ -0,0 +1,39 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "os" + + "github.com/prometheus/procfs" +) + +func getProcessStart() (float64, error) { + pid := os.Getpid() + p, err := procfs.NewProc(pid) + if err != nil { + return 0, err + } + + if stat, err := p.Stat(); err == nil { + return stat.StartTime() + } + return 0, err +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_windows.go b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go new file mode 100644 index 000000000..7813115e7 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "golang.org/x/sys/windows" +) + +func getProcessStart() (float64, error) { + processHandle := windows.CurrentProcess() + + var creationTime, exitTime, kernelTime, userTime windows.Filetime + if err := windows.GetProcessTimes(processHandle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil { + return 0, err + } + return float64(creationTime.Nanoseconds() / 1e9), nil +} diff --git a/vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go new file mode 100644 index 000000000..416e5eda2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "context" + + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +var ( + // featureInfo is a Prometheus Gauge metrics used for recording the enablement of a k8s feature. + featureInfo = k8smetrics.NewGaugeVec( + &k8smetrics.GaugeOpts{ + Namespace: "kubernetes", + Name: "feature_enabled", + Help: "This metric records the data about the stage and enablement of a k8s feature.", + StabilityLevel: k8smetrics.BETA, + }, + []string{"name", "stage"}, + ) +) + +func init() { + legacyregistry.MustRegister(featureInfo) +} + +func ResetFeatureInfoMetric() { + featureInfo.Reset() +} + +func RecordFeatureInfo(ctx context.Context, name string, stage string, enabled bool) { + value := 0.0 + if enabled { + value = 1.0 + } + featureInfo.WithContext(ctx).WithLabelValues(name, stage).Set(value) +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go new file mode 100644 index 000000000..be07977e2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go @@ -0,0 +1,189 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "errors" + "time" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// GaugeOps is the part of `prometheus.Gauge` that is relevant to +// instrumented code. +// This factoring should be in prometheus, analogous to the way +// it already factors out the Observer interface for histograms and summaries. +type GaugeOps interface { + // Set is the same as Gauge.Set + Set(float64) + // Inc is the same as Gauge.inc + Inc() + // Dec is the same as Gauge.Dec + Dec() + // Add is the same as Gauge.Add + Add(float64) + // Sub is the same as Gauge.Sub + Sub(float64) + + // SetToCurrentTime the same as Gauge.SetToCurrentTime + SetToCurrentTime() +} + +// A TimingHistogram tracks how long a `float64` variable spends in +// ranges defined by buckets. Time is counted in nanoseconds. The +// histogram's sum is the integral over time (in nanoseconds, from +// creation of the histogram) of the variable's value. +type TimingHistogram interface { + prometheus.Metric + prometheus.Collector + GaugeOps +} + +// TimingHistogramOpts is the parameters of the TimingHistogram constructor +type TimingHistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels prometheus.Labels + + // Buckets defines the buckets into which observations are + // accumulated. Each element in the slice is the upper + // inclusive bound of a bucket. The values must be sorted in + // strictly increasing order. There is no need to add a + // highest bucket with +Inf bound. The default value is + // prometheus.DefBuckets. + Buckets []float64 + + // The initial value of the variable. + InitialValue float64 +} + +// NewTimingHistogram creates a new TimingHistogram +func NewTimingHistogram(opts TimingHistogramOpts) (TimingHistogram, error) { + return NewTestableTimingHistogram(time.Now, opts) +} + +// NewTestableTimingHistogram creates a TimingHistogram that uses a mockable clock +func NewTestableTimingHistogram(nowFunc func() time.Time, opts TimingHistogramOpts) (TimingHistogram, error) { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapTimingHelp(opts.Help), + nil, + opts.ConstLabels, + ) + return newTimingHistogram(nowFunc, desc, opts) +} + +func wrapTimingHelp(given string) string { + return "EXPERIMENTAL: " + given +} + +func newTimingHistogram(nowFunc func() time.Time, desc *prometheus.Desc, opts TimingHistogramOpts, variableLabelValues ...string) (TimingHistogram, error) { + allLabelsM := prometheus.Labels{} + allLabelsS := prometheus.MakeLabelPairs(desc, variableLabelValues) + for _, pair := range allLabelsS { + if pair == nil || pair.Name == nil || pair.Value == nil { + return nil, errors.New("prometheus.MakeLabelPairs returned a nil") + } + allLabelsM[*pair.Name] = *pair.Value + } + weighted, err := newWeightedHistogram(desc, WeightedHistogramOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: opts.Name, + Help: opts.Help, + ConstLabels: allLabelsM, + Buckets: opts.Buckets, + }, variableLabelValues...) + if err != nil { + return nil, err + } + return &timingHistogram{ + nowFunc: nowFunc, + weighted: weighted, + lastSetTime: nowFunc(), + value: opts.InitialValue, + }, nil +} + +type timingHistogram struct { + nowFunc func() time.Time + weighted *weightedHistogram + + // The following fields must only be accessed with weighted's lock held + + lastSetTime time.Time // identifies when value was last set + value float64 +} + +var _ TimingHistogram = &timingHistogram{} + +func (th *timingHistogram) Set(newValue float64) { + th.update(func(float64) float64 { return newValue }) +} + +func (th *timingHistogram) Inc() { + th.update(func(oldValue float64) float64 { return oldValue + 1 }) +} + +func (th *timingHistogram) Dec() { + th.update(func(oldValue float64) float64 { return oldValue - 1 }) +} + +func (th *timingHistogram) Add(delta float64) { + th.update(func(oldValue float64) float64 { return oldValue + delta }) +} + +func (th *timingHistogram) Sub(delta float64) { + th.update(func(oldValue float64) float64 { return oldValue - delta }) +} + +func (th *timingHistogram) SetToCurrentTime() { + th.update(func(oldValue float64) float64 { return th.nowFunc().Sub(time.Unix(0, 0)).Seconds() }) +} + +func (th *timingHistogram) update(updateFn func(float64) float64) { + th.weighted.lock.Lock() + defer th.weighted.lock.Unlock() + now := th.nowFunc() + delta := now.Sub(th.lastSetTime) + value := th.value + if delta > 0 { + th.weighted.observeWithWeightLocked(value, uint64(delta)) + th.lastSetTime = now + } + th.value = updateFn(value) +} + +func (th *timingHistogram) Desc() *prometheus.Desc { + return th.weighted.Desc() +} + +func (th *timingHistogram) Write(dest *dto.Metric) error { + th.Add(0) // account for time since last update + return th.weighted.Write(dest) +} + +func (th *timingHistogram) Describe(ch chan<- *prometheus.Desc) { + ch <- th.weighted.Desc() +} + +func (th *timingHistogram) Collect(ch chan<- prometheus.Metric) { + ch <- th +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go new file mode 100644 index 000000000..7af1a4586 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go @@ -0,0 +1,111 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// GaugeVecOps is a bunch of Gauge that have the same +// Desc and are distinguished by the values for their variable labels. +type GaugeVecOps interface { + GetMetricWith(prometheus.Labels) (GaugeOps, error) + GetMetricWithLabelValues(lvs ...string) (GaugeOps, error) + With(prometheus.Labels) GaugeOps + WithLabelValues(...string) GaugeOps + CurryWith(prometheus.Labels) (GaugeVecOps, error) + MustCurryWith(prometheus.Labels) GaugeVecOps +} + +type TimingHistogramVec struct { + *prometheus.MetricVec +} + +var _ GaugeVecOps = &TimingHistogramVec{} +var _ prometheus.Collector = &TimingHistogramVec{} + +func NewTimingHistogramVec(opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec { + return NewTestableTimingHistogramVec(time.Now, opts, labelNames...) +} + +func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapTimingHelp(opts.Help), + labelNames, + opts.ConstLabels, + ) + return &TimingHistogramVec{ + MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric { + metric, err := newTimingHistogram(nowFunc, desc, opts, lvs...) + if err != nil { + panic(err) // like in prometheus.newHistogram + } + return metric + }), + } +} + +func (hv *TimingHistogramVec) GetMetricWith(labels prometheus.Labels) (GaugeOps, error) { + metric, err := hv.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(GaugeOps), err + } + return nil, err +} + +func (hv *TimingHistogramVec) GetMetricWithLabelValues(lvs ...string) (GaugeOps, error) { + metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(GaugeOps), err + } + return nil, err +} + +func (hv *TimingHistogramVec) With(labels prometheus.Labels) GaugeOps { + h, err := hv.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +func (hv *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeOps { + h, err := hv.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +func (hv *TimingHistogramVec) CurryWith(labels prometheus.Labels) (GaugeVecOps, error) { + vec, err := hv.MetricVec.CurryWith(labels) + if vec != nil { + return &TimingHistogramVec{MetricVec: vec}, err + } + return nil, err +} + +func (hv *TimingHistogramVec) MustCurryWith(labels prometheus.Labels) GaugeVecOps { + vec, err := hv.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go new file mode 100644 index 000000000..a060019b2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go @@ -0,0 +1,203 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "fmt" + "math" + "sort" + "sync" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// WeightedHistogram generalizes Histogram: each observation has +// an associated _weight_. For a given `x` and `N`, +// `1` call on `ObserveWithWeight(x, N)` has the same meaning as +// `N` calls on `ObserveWithWeight(x, 1)`. +// The weighted sum might differ slightly due to the use of +// floating point, although the implementation takes some steps +// to mitigate that. +// If every weight were 1, +// this would be the same as the existing Histogram abstraction. +type WeightedHistogram interface { + prometheus.Metric + prometheus.Collector + WeightedObserver +} + +// WeightedObserver generalizes the Observer interface. +type WeightedObserver interface { + // Set the variable to the given value with the given weight. + ObserveWithWeight(value float64, weight uint64) +} + +// WeightedHistogramOpts is the same as for an ordinary Histogram +type WeightedHistogramOpts = prometheus.HistogramOpts + +// NewWeightedHistogram creates a new WeightedHistogram +func NewWeightedHistogram(opts WeightedHistogramOpts) (WeightedHistogram, error) { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapWeightedHelp(opts.Help), + nil, + opts.ConstLabels, + ) + return newWeightedHistogram(desc, opts) +} + +func wrapWeightedHelp(given string) string { + return "EXPERIMENTAL: " + given +} + +func newWeightedHistogram(desc *prometheus.Desc, opts WeightedHistogramOpts, variableLabelValues ...string) (*weightedHistogram, error) { + if len(opts.Buckets) == 0 { + opts.Buckets = prometheus.DefBuckets + } + + for i, upperBound := range opts.Buckets { + if i < len(opts.Buckets)-1 { + if upperBound >= opts.Buckets[i+1] { + return nil, fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, opts.Buckets[i+1], + ) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + opts.Buckets = opts.Buckets[:i] + } + } + } + upperBounds := make([]float64, len(opts.Buckets)) + copy(upperBounds, opts.Buckets) + + return &weightedHistogram{ + desc: desc, + variableLabelValues: variableLabelValues, + upperBounds: upperBounds, + buckets: make([]uint64, len(upperBounds)+1), + hotCount: initialHotCount, + }, nil +} + +type weightedHistogram struct { + desc *prometheus.Desc + variableLabelValues []string + upperBounds []float64 // exclusive of +Inf + + lock sync.Mutex // applies to all the following + + // buckets is longer by one than upperBounds. + // For 0 <= idx < len(upperBounds), buckets[idx] holds the + // accumulated time.Duration that value has been <= + // upperBounds[idx] but not <= upperBounds[idx-1]. + // buckets[len(upperBounds)] holds the accumulated + // time.Duration when value fit in no other bucket. + buckets []uint64 + + // sumHot + sumCold is the weighted sum of value. + // Rather than risk loss of precision in one + // float64, we do this sum hierarchically. Many successive + // increments are added into sumHot; once in a while + // the magnitude of sumHot is compared to the magnitude + // of sumCold and, if the ratio is high enough, + // sumHot is transferred into sumCold. + sumHot float64 + sumCold float64 + + transferThreshold float64 // = math.Abs(sumCold) / 2^26 (that's about half of the bits of precision in a float64) + + // hotCount is used to decide when to consider dumping sumHot into sumCold. + // hotCount counts upward from initialHotCount to zero. + hotCount int +} + +// initialHotCount is the negative of the number of terms +// that are summed into sumHot before considering whether +// to transfer to sumCold. This only has to be big enough +// to make the extra floating point operations occur in a +// distinct minority of cases. +const initialHotCount = -15 + +var _ WeightedHistogram = &weightedHistogram{} +var _ prometheus.Metric = &weightedHistogram{} +var _ prometheus.Collector = &weightedHistogram{} + +func (sh *weightedHistogram) ObserveWithWeight(value float64, weight uint64) { + idx := sort.SearchFloat64s(sh.upperBounds, value) + sh.lock.Lock() + defer sh.lock.Unlock() + sh.updateLocked(idx, value, weight) +} + +func (sh *weightedHistogram) observeWithWeightLocked(value float64, weight uint64) { + idx := sort.SearchFloat64s(sh.upperBounds, value) + sh.updateLocked(idx, value, weight) +} + +func (sh *weightedHistogram) updateLocked(idx int, value float64, weight uint64) { + sh.buckets[idx] += weight + newSumHot := sh.sumHot + float64(weight)*value + sh.hotCount++ + if sh.hotCount >= 0 { + sh.hotCount = initialHotCount + if math.Abs(newSumHot) > sh.transferThreshold { + newSumCold := sh.sumCold + newSumHot + sh.sumCold = newSumCold + sh.transferThreshold = math.Abs(newSumCold / 67108864) + sh.sumHot = 0 + return + } + } + sh.sumHot = newSumHot +} + +func (sh *weightedHistogram) Desc() *prometheus.Desc { + return sh.desc +} + +func (sh *weightedHistogram) Write(dest *dto.Metric) error { + count, sum, buckets := func() (uint64, float64, map[float64]uint64) { + sh.lock.Lock() + defer sh.lock.Unlock() + nBounds := len(sh.upperBounds) + buckets := make(map[float64]uint64, nBounds) + var count uint64 + for idx, upperBound := range sh.upperBounds { + count += sh.buckets[idx] + buckets[upperBound] = count + } + count += sh.buckets[nBounds] + return count, sh.sumHot + sh.sumCold, buckets + }() + metric, err := prometheus.NewConstHistogram(sh.desc, count, sum, buckets, sh.variableLabelValues...) + if err != nil { + return err + } + return metric.Write(dest) +} + +func (sh *weightedHistogram) Describe(ch chan<- *prometheus.Desc) { + ch <- sh.desc +} + +func (sh *weightedHistogram) Collect(ch chan<- prometheus.Metric) { + ch <- sh +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go new file mode 100644 index 000000000..2ca95f0a7 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go @@ -0,0 +1,106 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// WeightedObserverVec is a bunch of WeightedObservers that have the same +// Desc and are distinguished by the values for their variable labels. +type WeightedObserverVec interface { + GetMetricWith(prometheus.Labels) (WeightedObserver, error) + GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error) + With(prometheus.Labels) WeightedObserver + WithLabelValues(...string) WeightedObserver + CurryWith(prometheus.Labels) (WeightedObserverVec, error) + MustCurryWith(prometheus.Labels) WeightedObserverVec +} + +// WeightedHistogramVec implements WeightedObserverVec +type WeightedHistogramVec struct { + *prometheus.MetricVec +} + +var _ WeightedObserverVec = &WeightedHistogramVec{} +var _ prometheus.Collector = &WeightedHistogramVec{} + +func NewWeightedHistogramVec(opts WeightedHistogramOpts, labelNames ...string) *WeightedHistogramVec { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapWeightedHelp(opts.Help), + labelNames, + opts.ConstLabels, + ) + return &WeightedHistogramVec{ + MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric { + metric, err := newWeightedHistogram(desc, opts, lvs...) + if err != nil { + panic(err) // like in prometheus.newHistogram + } + return metric + }), + } +} + +func (hv *WeightedHistogramVec) GetMetricWith(labels prometheus.Labels) (WeightedObserver, error) { + metric, err := hv.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(WeightedObserver), err + } + return nil, err +} + +func (hv *WeightedHistogramVec) GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error) { + metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(WeightedObserver), err + } + return nil, err +} + +func (hv *WeightedHistogramVec) With(labels prometheus.Labels) WeightedObserver { + h, err := hv.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +func (hv *WeightedHistogramVec) WithLabelValues(lvs ...string) WeightedObserver { + h, err := hv.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +func (hv *WeightedHistogramVec) CurryWith(labels prometheus.Labels) (WeightedObserverVec, error) { + vec, err := hv.MetricVec.CurryWith(labels) + if vec != nil { + return &WeightedHistogramVec{MetricVec: vec}, err + } + return nil, err +} + +func (hv *WeightedHistogramVec) MustCurryWith(labels prometheus.Labels) WeightedObserverVec { + vec, err := hv.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go new file mode 100644 index 000000000..1942f9958 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -0,0 +1,385 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + apimachineryversion "k8s.io/apimachinery/pkg/version" + "k8s.io/component-base/version" +) + +var ( + showHiddenOnce sync.Once + disabledMetricsLock sync.RWMutex + showHidden atomic.Bool + registries []*kubeRegistry // stores all registries created by NewKubeRegistry() + registriesLock sync.RWMutex + disabledMetrics = map[string]struct{}{} + + registeredMetrics = NewCounterVec( + &CounterOpts{ + Name: "registered_metrics_total", + Help: "The count of registered metrics broken by stability level and deprecation version.", + StabilityLevel: BETA, + }, + []string{"stability_level", "deprecated_version"}, + ) + + disabledMetricsTotal = NewCounter( + &CounterOpts{ + Name: "disabled_metrics_total", + Help: "The count of disabled metrics.", + StabilityLevel: BETA, + }, + ) + + hiddenMetricsTotal = NewCounter( + &CounterOpts{ + Name: "hidden_metrics_total", + Help: "The count of hidden metrics.", + StabilityLevel: BETA, + }, + ) +) + +// shouldHide be used to check if a specific metric with deprecated version should be hidden +// according to metrics deprecation lifecycle. +func shouldHide(currentVersion *semver.Version, deprecatedVersion *semver.Version) bool { + guardVersion, err := semver.Make(fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor)) + if err != nil { + panic("failed to make version from current version") + } + + if deprecatedVersion.LT(guardVersion) { + return true + } + + return false +} + +// ValidateShowHiddenMetricsVersion checks invalid version for which show hidden metrics. +func ValidateShowHiddenMetricsVersion(v string) []error { + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), v) + if err != nil { + return []error{err} + } + + return nil +} + +func SetDisabledMetric(name string) { + disabledMetricsLock.Lock() + defer disabledMetricsLock.Unlock() + disabledMetrics[name] = struct{}{} + disabledMetricsTotal.Inc() +} + +// SetShowHidden will enable showing hidden metrics. This will no-opt +// after the initial call +func SetShowHidden() { + showHiddenOnce.Do(func() { + showHidden.Store(true) + + // re-register collectors that has been hidden in phase of last registry. + for _, r := range registries { + r.enableHiddenCollectors() + r.enableHiddenStableCollectors() + } + }) +} + +// ShouldShowHidden returns whether showing hidden deprecated metrics +// is enabled. While the primary usecase for this is internal (to determine +// registration behavior) this can also be used to introspect +func ShouldShowHidden() bool { + return showHidden.Load() +} + +// Registerable is an interface for a collector metric which we +// will register with KubeRegistry. +type Registerable interface { + prometheus.Collector + + // Create will mark deprecated state for the collector + Create(version *semver.Version) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // FQName returns the fully-qualified metric name of the collector. + FQName() string +} + +type resettable interface { + Reset() +} + +// KubeRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type KubeRegistry interface { + // Deprecated + RawMustRegister(...prometheus.Collector) + // CustomRegister is our internal variant of Prometheus registry.Register + CustomRegister(c StableCollector) error + // CustomMustRegister is our internal variant of Prometheus registry.MustRegister + CustomMustRegister(cs ...StableCollector) + // Register conforms to Prometheus registry.Register + Register(Registerable) error + // MustRegister conforms to Prometheus registry.MustRegister + MustRegister(...Registerable) + // Unregister conforms to Prometheus registry.Unregister + Unregister(collector Collector) bool + // Gather conforms to Prometheus gatherer.Gather + Gather() ([]*dto.MetricFamily, error) + // Reset invokes the Reset() function on all items in the registry + // which are added as resettables. + Reset() + // RegisterMetaMetrics registers metrics about the number of registered metrics. + RegisterMetaMetrics() + // Registerer exposes the underlying prometheus registerer + Registerer() prometheus.Registerer + // Gatherer exposes the underlying prometheus gatherer + Gatherer() prometheus.Gatherer +} + +// kubeRegistry is a wrapper around a prometheus registry-type object. Upon initialization +// the kubernetes binary version information is loaded into the registry object, so that +// automatic behavior can be configured for metric versioning. +type kubeRegistry struct { + PromRegistry + version semver.Version + hiddenCollectors map[string]Registerable // stores all collectors that has been hidden + stableCollectors []StableCollector // stores all stable collector + hiddenCollectorsLock sync.RWMutex + stableCollectorsLock sync.RWMutex + resetLock sync.RWMutex + resettables []resettable +} + +// Register registers a new Collector to be included in metrics +// collection. It returns an error if the descriptors provided by the +// Collector are invalid or if they — in combination with descriptors of +// already registered Collectors — do not fulfill the consistency and +// uniqueness criteria described in the documentation of metric.Desc. +func (kr *kubeRegistry) Register(c Registerable) error { + if c.Create(&kr.version) { + defer kr.addResettable(c) + return kr.PromRegistry.Register(c) + } + + kr.trackHiddenCollector(c) + return nil +} + +// Registerer exposes the underlying prometheus.Registerer +func (kr *kubeRegistry) Registerer() prometheus.Registerer { + return kr.PromRegistry +} + +// Gatherer exposes the underlying prometheus.Gatherer +func (kr *kubeRegistry) Gatherer() prometheus.Gatherer { + return kr.PromRegistry +} + +// MustRegister works like Register but registers any number of +// Collectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) MustRegister(cs ...Registerable) { + metrics := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version) { + metrics = append(metrics, c) + kr.addResettable(c) + } else { + kr.trackHiddenCollector(c) + } + } + kr.PromRegistry.MustRegister(metrics...) +} + +// CustomRegister registers a new custom collector. +func (kr *kubeRegistry) CustomRegister(c StableCollector) error { + kr.trackStableCollectors(c) + defer kr.addResettable(c) + if c.Create(&kr.version, c) { + return kr.PromRegistry.Register(c) + } + return nil +} + +// CustomMustRegister works like CustomRegister but registers any number of +// StableCollectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) CustomMustRegister(cs ...StableCollector) { + kr.trackStableCollectors(cs...) + collectors := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version, c) { + kr.addResettable(c) + collectors = append(collectors, c) + } + } + kr.PromRegistry.MustRegister(collectors...) +} + +// RawMustRegister takes a native prometheus.Collector and registers the collector +// to the registry. This bypasses metrics safety checks, so should only be used +// to register custom prometheus collectors. +// +// Deprecated +func (kr *kubeRegistry) RawMustRegister(cs ...prometheus.Collector) { + kr.PromRegistry.MustRegister(cs...) + for _, c := range cs { + kr.addResettable(c) + } +} + +// addResettable will automatically add our metric to our reset +// list if it satisfies the interface +func (kr *kubeRegistry) addResettable(i interface{}) { + kr.resetLock.Lock() + defer kr.resetLock.Unlock() + if resettable, ok := i.(resettable); ok { + kr.resettables = append(kr.resettables, resettable) + } +} + +// Unregister unregisters the Collector that equals the Collector passed +// in as an argument. (Two Collectors are considered equal if their +// Describe method yields the same set of descriptors.) The function +// returns whether a Collector was unregistered. Note that an unchecked +// Collector cannot be unregistered (as its Describe method does not +// yield any descriptor). +func (kr *kubeRegistry) Unregister(collector Collector) bool { + return kr.PromRegistry.Unregister(collector) +} + +// Gather calls the Collect method of the registered Collectors and then +// gathers the collected metrics into a lexicographically sorted slice +// of uniquely named MetricFamily protobufs. Gather ensures that the +// returned slice is valid and self-consistent so that it can be used +// for valid exposition. As an exception to the strict consistency +// requirements described for metric.Desc, Gather will tolerate +// different sets of label names for metrics of the same metric family. +func (kr *kubeRegistry) Gather() ([]*dto.MetricFamily, error) { + return kr.PromRegistry.Gather() +} + +// trackHiddenCollector stores all hidden collectors. +func (kr *kubeRegistry) trackHiddenCollector(c Registerable) { + kr.hiddenCollectorsLock.Lock() + defer kr.hiddenCollectorsLock.Unlock() + + kr.hiddenCollectors[c.FQName()] = c + hiddenMetricsTotal.Inc() +} + +// trackStableCollectors stores all custom collectors. +func (kr *kubeRegistry) trackStableCollectors(cs ...StableCollector) { + kr.stableCollectorsLock.Lock() + defer kr.stableCollectorsLock.Unlock() + + kr.stableCollectors = append(kr.stableCollectors, cs...) +} + +// enableHiddenCollectors will re-register all of the hidden collectors. +func (kr *kubeRegistry) enableHiddenCollectors() { + if len(kr.hiddenCollectors) == 0 { + return + } + + kr.hiddenCollectorsLock.Lock() + cs := make([]Registerable, 0, len(kr.hiddenCollectors)) + + for _, c := range kr.hiddenCollectors { + c.ClearState() + cs = append(cs, c) + } + + kr.hiddenCollectors = make(map[string]Registerable) + kr.hiddenCollectorsLock.Unlock() + kr.MustRegister(cs...) +} + +// enableHiddenStableCollectors will re-register the stable collectors if there is one or more hidden metrics in it. +// Since we can not register a metrics twice, so we have to unregister first then register again. +func (kr *kubeRegistry) enableHiddenStableCollectors() { + if len(kr.stableCollectors) == 0 { + return + } + + kr.stableCollectorsLock.Lock() + + cs := make([]StableCollector, 0, len(kr.stableCollectors)) + for _, c := range kr.stableCollectors { + if len(c.HiddenMetrics()) > 0 { + kr.Unregister(c) // unregister must happens before clear state, otherwise no metrics would be unregister + c.ClearState() + cs = append(cs, c) + } + } + + kr.stableCollectors = nil + kr.stableCollectorsLock.Unlock() + kr.CustomMustRegister(cs...) +} + +// Reset invokes Reset on all metrics that are resettable. +func (kr *kubeRegistry) Reset() { + kr.resetLock.RLock() + defer kr.resetLock.RUnlock() + for _, r := range kr.resettables { + r.Reset() + } +} + +// BuildVersion is a helper function that can be easily mocked. +var BuildVersion = version.Get + +func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry { + r := &kubeRegistry{ + PromRegistry: prometheus.NewRegistry(), + version: parseVersion(v), + hiddenCollectors: make(map[string]Registerable), + resettables: make([]resettable, 0), + } + + registriesLock.Lock() + defer registriesLock.Unlock() + registries = append(registries, r) + + return r +} + +// NewKubeRegistry creates a new vanilla Registry +func NewKubeRegistry() KubeRegistry { + r := newKubeRegistry(BuildVersion()) + return r +} + +func (r *kubeRegistry) RegisterMetaMetrics() { + r.MustRegister(registeredMetrics) + r.MustRegister(disabledMetricsTotal) + r.MustRegister(hiddenMetricsTotal) +} diff --git a/vendor/k8s.io/component-base/metrics/summary.go b/vendor/k8s.io/component-base/metrics/summary.go new file mode 100644 index 000000000..d40421645 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/summary.go @@ -0,0 +1,226 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + DefAgeBuckets = prometheus.DefAgeBuckets + DefBufCap = prometheus.DefBufCap + DefMaxAge = prometheus.DefMaxAge +) + +// Summary is our internal representation for our wrapping struct around prometheus +// summaries. Summary implements both kubeCollector and ObserverMetric +// +// DEPRECATED: as per the metrics overhaul KEP +type Summary struct { + ObserverMetric + *SummaryOpts + lazyMetric + selfCollector +} + +// NewSummary returns an object which is Summary-like. However, nothing +// will be measured until the summary is registered somewhere. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummary(opts *SummaryOpts) *Summary { + opts.StabilityLevel.setDefaults() + + s := &Summary{ + SummaryOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + s.setPrometheusSummary(noopMetric{}) + s.lazyInit(s, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return s +} + +// setPrometheusSummary sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (s *Summary) setPrometheusSummary(summary prometheus.Summary) { + s.ObserverMetric = summary + s.initSelfCollection(summary) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (s *Summary) DeprecatedVersion() *semver.Version { + return parseSemver(s.SummaryOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Summary object instantiation +// and stores a reference to it +func (s *Summary) initializeMetric() { + s.SummaryOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + s.setPrometheusSummary(prometheus.NewSummary(s.SummaryOpts.toPromSummaryOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Summary object instantiation +// but modifies the Help description prior to object instantiation. +func (s *Summary) initializeDeprecatedMetric() { + s.SummaryOpts.markDeprecated() + s.initializeMetric() +} + +// WithContext allows the normal Summary metric to pass in context. The context is no-op now. +func (s *Summary) WithContext(ctx context.Context) ObserverMetric { + return s.ObserverMetric +} + +// SummaryVec is the internal representation of our wrapping struct around prometheus +// summaryVecs. +// +// DEPRECATED: as per the metrics overhaul KEP +type SummaryVec struct { + *prometheus.SummaryVec + *SummaryOpts + lazyMetric + originalLabels []string +} + +// NewSummaryVec returns an object which satisfies kubeCollector and wraps the +// prometheus.SummaryVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated, +// and only members extracted after +// registration will actually measure anything. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &SummaryVec{ + SummaryOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *SummaryVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.SummaryOpts.DeprecatedVersion) +} + +func (v *SummaryVec) initializeMetric() { + v.SummaryOpts.annotateStabilityLevel() + v.SummaryVec = prometheus.NewSummaryVec(v.SummaryOpts.toPromSummaryOpts(), v.originalLabels) +} + +func (v *SummaryVec) initializeDeprecatedMetric() { + v.SummaryOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the summaryVec +// has been registered to a metrics registry. +func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.SummaryVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the summaryVec has +// been registered to a metrics registry. +func (v *SummaryVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.SummaryVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *SummaryVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.SummaryVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *SummaryVec) Reset() { + if !v.IsCreated() { + return + } + + v.SummaryVec.Reset() +} + +// WithContext returns wrapped SummaryVec with context +func (v *SummaryVec) WithContext(ctx context.Context) *SummaryVecWithContext { + return &SummaryVecWithContext{ + ctx: ctx, + SummaryVec: v, + } +} + +// SummaryVecWithContext is the wrapper of SummaryVec with context. +type SummaryVecWithContext struct { + *SummaryVec + ctx context.Context +} + +// WithLabelValues is the wrapper of SummaryVec.WithLabelValues. +func (vc *SummaryVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { + return vc.SummaryVec.WithLabelValues(lvs...) +} + +// With is the wrapper of SummaryVec.With. +func (vc *SummaryVecWithContext) With(labels map[string]string) ObserverMetric { + return vc.SummaryVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/timing_histogram.go b/vendor/k8s.io/component-base/metrics/timing_histogram.go new file mode 100644 index 000000000..a0f0b253c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/timing_histogram.go @@ -0,0 +1,270 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "time" + + "github.com/blang/semver/v4" + promext "k8s.io/component-base/metrics/prometheusextension" +) + +// PrometheusTimingHistogram is the abstraction of the underlying histogram +// that we want to promote from the wrapper. +type PrometheusTimingHistogram interface { + GaugeMetric +} + +// TimingHistogram is our internal representation for our wrapping struct around +// timing histograms. It implements both kubeCollector and GaugeMetric +type TimingHistogram struct { + PrometheusTimingHistogram + *TimingHistogramOpts + nowFunc func() time.Time + lazyMetric + selfCollector +} + +var _ GaugeMetric = &TimingHistogram{} +var _ Registerable = &TimingHistogram{} +var _ kubeCollector = &TimingHistogram{} + +// NewTimingHistogram returns an object which is TimingHistogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewTimingHistogram(opts *TimingHistogramOpts) *TimingHistogram { + return NewTestableTimingHistogram(time.Now, opts) +} + +// NewTestableTimingHistogram adds injection of the clock +func NewTestableTimingHistogram(nowFunc func() time.Time, opts *TimingHistogramOpts) *TimingHistogram { + opts.StabilityLevel.setDefaults() + + h := &TimingHistogram{ + TimingHistogramOpts: opts, + nowFunc: nowFunc, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *TimingHistogram) setPrometheusHistogram(histogram promext.TimingHistogram) { + h.PrometheusTimingHistogram = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *TimingHistogram) DeprecatedVersion() *semver.Version { + return parseSemver(h.TimingHistogramOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *TimingHistogram) initializeMetric() { + h.TimingHistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + histogram, err := promext.NewTestableTimingHistogram(h.nowFunc, h.TimingHistogramOpts.toPromHistogramOpts()) + if err != nil { + panic(err) // handle as for regular histograms + } + h.setPrometheusHistogram(histogram) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *TimingHistogram) initializeDeprecatedMetric() { + h.TimingHistogramOpts.markDeprecated() + h.initializeMetric() +} + +// WithContext allows the normal TimingHistogram metric to pass in context. The context is no-op now. +func (h *TimingHistogram) WithContext(ctx context.Context) GaugeMetric { + return h.PrometheusTimingHistogram +} + +// TimingHistogramVec is the internal representation of our wrapping struct around prometheus +// TimingHistogramVecs. +type TimingHistogramVec struct { + *promext.TimingHistogramVec + *TimingHistogramOpts + nowFunc func() time.Time + lazyMetric + originalLabels []string +} + +var _ GaugeVecMetric = &TimingHistogramVec{} +var _ Registerable = &TimingHistogramVec{} +var _ kubeCollector = &TimingHistogramVec{} + +// NewTimingHistogramVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces +// and wraps an underlying promext.TimingHistogramVec object. Note well the way that +// behavior depends on registration and whether this is hidden. +func NewTimingHistogramVec(opts *TimingHistogramOpts, labels []string) *TimingHistogramVec { + return NewTestableTimingHistogramVec(time.Now, opts, labels) +} + +// NewTestableTimingHistogramVec adds injection of the clock. +func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts *TimingHistogramOpts, labels []string) *TimingHistogramVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &TimingHistogramVec{ + TimingHistogramVec: noopTimingHistogramVec, + TimingHistogramOpts: opts, + nowFunc: nowFunc, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *TimingHistogramVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.TimingHistogramOpts.DeprecatedVersion) +} + +func (v *TimingHistogramVec) initializeMetric() { + v.TimingHistogramOpts.annotateStabilityLevel() + v.TimingHistogramVec = promext.NewTestableTimingHistogramVec(v.nowFunc, v.TimingHistogramOpts.toPromHistogramOpts(), v.originalLabels...) +} + +func (v *TimingHistogramVec) initializeDeprecatedMetric() { + v.TimingHistogramOpts.markDeprecated() + v.initializeMetric() +} + +// WithLabelValuesChecked, if called before this vector has been registered in +// at least one registry, will return a noop gauge and +// an error that passes ErrIsNotRegistered. +// If called on a hidden vector, +// will return a noop gauge and a nil error. +// If called with a syntactic problem in the labels, will +// return a noop gauge and an error about the labels. +// If none of the above apply, this method will return +// the appropriate vector member and a nil error. +func (v *TimingHistogramVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + ops, err := v.TimingHistogramVec.GetMetricWithLabelValues(lvs...) + if err != nil { + return noop, err + } + return ops.(GaugeMetric), err +} + +// WithLabelValues calls WithLabelValuesChecked +// and handles errors as follows. +// An error that passes ErrIsNotRegistered is ignored +// and the noop gauge is returned; +// all other errors cause a panic. +func (v *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeMetric { + ans, err := v.WithLabelValuesChecked(lvs...) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +// WithChecked, if called before this vector has been registered in +// at least one registry, will return a noop gauge and +// an error that passes ErrIsNotRegistered. +// If called on a hidden vector, +// will return a noop gauge and a nil error. +// If called with a syntactic problem in the labels, will +// return a noop gauge and an error about the labels. +// If none of the above apply, this method will return +// the appropriate vector member and a nil error. +func (v *TimingHistogramVec) WithChecked(labels map[string]string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + ops, err := v.TimingHistogramVec.GetMetricWith(labels) + return ops.(GaugeMetric), err +} + +// With calls WithChecked and handles errors as follows. +// An error that passes ErrIsNotRegistered is ignored +// and the noop gauge is returned; +// all other errors cause a panic. +func (v *TimingHistogramVec) With(labels map[string]string) GaugeMetric { + ans, err := v.WithChecked(labels) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *TimingHistogramVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.TimingHistogramVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *TimingHistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.TimingHistogramVec.Reset() +} + +// WithContext returns wrapped TimingHistogramVec with context +func (v *TimingHistogramVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric { + return &TimingHistogramVecWithContext{ + ctx: ctx, + TimingHistogramVec: v, + } +} + +// TimingHistogramVecWithContext is the wrapper of TimingHistogramVec with context. +// Currently the context is ignored. +type TimingHistogramVecWithContext struct { + *TimingHistogramVec + ctx context.Context +} diff --git a/vendor/k8s.io/component-base/metrics/value.go b/vendor/k8s.io/component-base/metrics/value.go new file mode 100644 index 000000000..4a405048c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/value.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +func (vt *ValueType) toPromValueType() prometheus.ValueType { + return prometheus.ValueType(*vt) +} + +// NewLazyConstMetric is a helper of MustNewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + if desc.IsHidden() { + return nil + } + return prometheus.MustNewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewConstMetric is a helper of NewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.IsHidden() { + return nil, nil + } + return prometheus.NewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp. +// +// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(), +// otherwise, no stability guarantees would be offered. +func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric { + if m == nil { + return nil + } + + return prometheus.NewMetricWithTimestamp(t, m) +} diff --git a/vendor/k8s.io/component-base/metrics/version.go b/vendor/k8s.io/component-base/metrics/version.go new file mode 100644 index 000000000..f963e205e --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "k8s.io/component-base/version" + +var ( + buildInfo = NewGaugeVec( + &GaugeOpts{ + Name: "kubernetes_build_info", + Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.", + StabilityLevel: ALPHA, + }, + []string{"major", "minor", "git_version", "git_commit", "git_tree_state", "build_date", "go_version", "compiler", "platform"}, + ) +) + +// RegisterBuildInfo registers the build and version info in a metadata metric in prometheus +func RegisterBuildInfo(r KubeRegistry) { + info := version.Get() + r.MustRegister(buildInfo) + buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1) +} diff --git a/vendor/k8s.io/component-base/metrics/version_parser.go b/vendor/k8s.io/component-base/metrics/version_parser.go new file mode 100644 index 000000000..102e108e2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version_parser.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver/v4" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +const ( + versionRegexpString = `^v(\d+\.\d+\.\d+)` +) + +var ( + versionRe = regexp.MustCompile(versionRegexpString) +) + +func parseSemver(s string) *semver.Version { + if s != "" { + sv := semver.MustParse(s) + return &sv + } + return nil +} +func parseVersion(ver apimachineryversion.Info) semver.Version { + matches := versionRe.FindAllStringSubmatch(ver.String(), -1) + + if len(matches) != 1 { + panic(fmt.Sprintf("version string \"%v\" doesn't match expected regular expression: \"%v\"", ver.String(), versionRe.String())) + } + return semver.MustParse(matches[0][1]) +} diff --git a/vendor/k8s.io/component-base/metrics/wrappers.go b/vendor/k8s.io/component-base/metrics/wrappers.go new file mode 100644 index 000000000..679590aad --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/wrappers.go @@ -0,0 +1,167 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "errors" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// This file contains a series of interfaces which we explicitly define for +// integrating with prometheus. We redefine the interfaces explicitly here +// so that we can prevent breakage if methods are ever added to prometheus +// variants of them. + +// Collector defines a subset of prometheus.Collector interface methods +type Collector interface { + Describe(chan<- *prometheus.Desc) + Collect(chan<- prometheus.Metric) +} + +// Metric defines a subset of prometheus.Metric interface methods +type Metric interface { + Desc() *prometheus.Desc + Write(*dto.Metric) error +} + +// CounterMetric is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. + +// CounterMetric is an interface which defines a subset of the interface provided by prometheus.Counter +type CounterMetric interface { + Inc() + Add(float64) +} + +// CounterVecMetric is an interface which prometheus.CounterVec satisfies. +type CounterVecMetric interface { + WithLabelValues(...string) CounterMetric + With(prometheus.Labels) CounterMetric +} + +// GaugeMetric is an interface which defines a subset of the interface provided by prometheus.Gauge +type GaugeMetric interface { + Set(float64) + Inc() + Dec() + Add(float64) + Write(out *dto.Metric) error + SetToCurrentTime() +} + +// GaugeVecMetric is a collection of Gauges that differ only in label values. +type GaugeVecMetric interface { + // Default Prometheus Vec behavior is that member extraction results in creation of a new element + // if one with the unique label values is not found in the underlying stored metricMap. + // This means that if this function is called but the underlying metric is not registered + // (which means it will never be exposed externally nor consumed), the metric would exist in memory + // for perpetuity (i.e. throughout application lifecycle). + // + // For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 + // + // In contrast, the Vec behavior in this package is that member extraction before registration + // returns a permanent noop object. + + // WithLabelValuesChecked, if called before this vector has been registered in + // at least one registry, will return a noop gauge and + // an error that passes ErrIsNotRegistered. + // If called on a hidden vector, + // will return a noop gauge and a nil error. + // If called with a syntactic problem in the labels, will + // return a noop gauge and an error about the labels. + // If none of the above apply, this method will return + // the appropriate vector member and a nil error. + WithLabelValuesChecked(labelValues ...string) (GaugeMetric, error) + + // WithLabelValues calls WithLabelValuesChecked + // and handles errors as follows. + // An error that passes ErrIsNotRegistered is ignored + // and the noop gauge is returned; + // all other errors cause a panic. + WithLabelValues(labelValues ...string) GaugeMetric + + // WithChecked, if called before this vector has been registered in + // at least one registry, will return a noop gauge and + // an error that passes ErrIsNotRegistered. + // If called on a hidden vector, + // will return a noop gauge and a nil error. + // If called with a syntactic problem in the labels, will + // return a noop gauge and an error about the labels. + // If none of the above apply, this method will return + // the appropriate vector member and a nil error. + WithChecked(labels map[string]string) (GaugeMetric, error) + + // With calls WithChecked and handles errors as follows. + // An error that passes ErrIsNotRegistered is ignored + // and the noop gauge is returned; + // all other errors cause a panic. + With(labels map[string]string) GaugeMetric + + // Delete asserts that the vec should have no member for the given label set. + // The returned bool indicates whether there was a change. + // The return will certainly be `false` if the given label set has the wrong + // set of label names. + Delete(map[string]string) bool + + // Reset removes all the members + Reset() +} + +// ObserverMetric captures individual observations. +type ObserverMetric interface { + Observe(float64) +} + +// PromRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type PromRegistry interface { + Register(prometheus.Collector) error + MustRegister(...prometheus.Collector) + Unregister(prometheus.Collector) bool + Gather() ([]*dto.MetricFamily, error) +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. +type Gatherer interface { + prometheus.Gatherer +} + +// Registerer is the interface for the part of a registry in charge of registering +// the collected metrics. +type Registerer interface { + prometheus.Registerer +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +func ErrIsNotRegistered(err error) bool { + return err == errNotRegistered +} + +var errNotRegistered = errors.New("metric vec is not registered yet") diff --git a/vendor/modules.txt b/vendor/modules.txt index 0559552a1..057d97f05 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -20,6 +20,9 @@ github.com/ahmetb/gen-crd-api-reference-docs # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile +# github.com/blang/semver/v4 v4.0.0 +## explicit; go 1.14 +github.com/blang/semver/v4 # github.com/bronze1man/yaml2json v0.0.0-20211227013850-8972abeaea25 ## explicit github.com/bronze1man/yaml2json @@ -37,6 +40,9 @@ github.com/davecgh/go-spew/spew ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log +# github.com/evanphx/json-patch v5.6.0+incompatible +## explicit +github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.6.0 ## explicit; go 1.12 github.com/evanphx/json-patch/v5 @@ -114,6 +120,7 @@ github.com/gardener/gardener/pkg/extensions github.com/gardener/gardener/pkg/gardenlet/apis/config github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1 github.com/gardener/gardener/pkg/logger +github.com/gardener/gardener/pkg/mock/controller-runtime/client github.com/gardener/gardener/pkg/resourcemanager/controller/garbagecollector/references github.com/gardener/gardener/pkg/utils github.com/gardener/gardener/pkg/utils/context @@ -125,6 +132,8 @@ github.com/gardener/gardener/pkg/utils/kubernetes/health github.com/gardener/gardener/pkg/utils/kubernetes/unstructured github.com/gardener/gardener/pkg/utils/retry github.com/gardener/gardener/pkg/utils/secrets +github.com/gardener/gardener/pkg/utils/test +github.com/gardener/gardener/pkg/utils/test/matchers github.com/gardener/gardener/pkg/utils/timewindow github.com/gardener/gardener/pkg/utils/validation/kubernetesversion github.com/gardener/gardener/pkg/utils/version @@ -332,6 +341,8 @@ github.com/onsi/ginkgo/v2/types ## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format +github.com/onsi/gomega/gstruct +github.com/onsi/gomega/gstruct/errors github.com/onsi/gomega/internal github.com/onsi/gomega/internal/gutil github.com/onsi/gomega/matchers @@ -378,6 +389,7 @@ github.com/spf13/cobra github.com/spf13/pflag # go.uber.org/mock v0.2.0 ## explicit; go 1.19 +go.uber.org/mock/gomock go.uber.org/mock/mockgen go.uber.org/mock/mockgen/model # go.uber.org/multierr v1.11.0 @@ -805,6 +817,7 @@ k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest k8s.io/client-go/rest/watch k8s.io/client-go/restmapper +k8s.io/client-go/testing k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache k8s.io/client-go/tools/cache/synctrack @@ -873,6 +886,11 @@ k8s.io/code-generator/third_party/forked/golang/reflect ## explicit; go 1.20 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 +k8s.io/component-base/featuregate +k8s.io/component-base/metrics +k8s.io/component-base/metrics/legacyregistry +k8s.io/component-base/metrics/prometheus/feature +k8s.io/component-base/metrics/prometheusextension k8s.io/component-base/version # k8s.io/gengo v0.0.0-20220902162205-c0856e24416d ## explicit; go 1.13 @@ -961,6 +979,8 @@ sigs.k8s.io/controller-runtime/pkg/certwatcher sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil +sigs.k8s.io/controller-runtime/pkg/client/fake +sigs.k8s.io/controller-runtime/pkg/client/interceptor sigs.k8s.io/controller-runtime/pkg/cluster sigs.k8s.io/controller-runtime/pkg/config sigs.k8s.io/controller-runtime/pkg/config/v1alpha1 @@ -974,6 +994,7 @@ sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/field/selector sigs.k8s.io/controller-runtime/pkg/internal/httpserver sigs.k8s.io/controller-runtime/pkg/internal/log +sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/internal/source sigs.k8s.io/controller-runtime/pkg/leaderelection diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go new file mode 100644 index 000000000..9deb6756c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -0,0 +1,1260 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "runtime/debug" + "strconv" + "strings" + "sync" + "time" + + // Using v4 to match upstream + jsonpatch "github.com/evanphx/json-patch" + corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/testing" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" + "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +type versionedTracker struct { + testing.ObjectTracker + scheme *runtime.Scheme + withStatusSubresource sets.Set[schema.GroupVersionKind] +} + +type fakeClient struct { + tracker versionedTracker + scheme *runtime.Scheme + restMapper meta.RESTMapper + withStatusSubresource sets.Set[schema.GroupVersionKind] + + // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. + // The inner map maps from index name to IndexerFunc. + indexes map[schema.GroupVersionKind]map[string]client.IndexerFunc + + schemeWriteLock sync.Mutex +} + +var _ client.WithWatch = &fakeClient{} + +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +// NewFakeClient creates a new fake client for testing. +// You can choose to initialize it with a slice of runtime.Object. +func NewFakeClient(initObjs ...runtime.Object) client.WithWatch { + return NewClientBuilder().WithRuntimeObjects(initObjs...).Build() +} + +// NewClientBuilder returns a new builder to create a fake client. +func NewClientBuilder() *ClientBuilder { + return &ClientBuilder{} +} + +// ClientBuilder builds a fake client. +type ClientBuilder struct { + scheme *runtime.Scheme + restMapper meta.RESTMapper + initObject []client.Object + initLists []client.ObjectList + initRuntimeObjects []runtime.Object + withStatusSubresource []client.Object + objectTracker testing.ObjectTracker + interceptorFuncs *interceptor.Funcs + + // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. + // The inner map maps from index name to IndexerFunc. + indexes map[schema.GroupVersionKind]map[string]client.IndexerFunc +} + +// WithScheme sets this builder's internal scheme. +// If not set, defaults to client-go's global scheme.Scheme. +func (f *ClientBuilder) WithScheme(scheme *runtime.Scheme) *ClientBuilder { + f.scheme = scheme + return f +} + +// WithRESTMapper sets this builder's restMapper. +// The restMapper is directly set as mapper in the Client. This can be used for example +// with a meta.DefaultRESTMapper to provide a static rest mapping. +// If not set, defaults to an empty meta.DefaultRESTMapper. +func (f *ClientBuilder) WithRESTMapper(restMapper meta.RESTMapper) *ClientBuilder { + f.restMapper = restMapper + return f +} + +// WithObjects can be optionally used to initialize this fake client with client.Object(s). +func (f *ClientBuilder) WithObjects(initObjs ...client.Object) *ClientBuilder { + f.initObject = append(f.initObject, initObjs...) + return f +} + +// WithLists can be optionally used to initialize this fake client with client.ObjectList(s). +func (f *ClientBuilder) WithLists(initLists ...client.ObjectList) *ClientBuilder { + f.initLists = append(f.initLists, initLists...) + return f +} + +// WithRuntimeObjects can be optionally used to initialize this fake client with runtime.Object(s). +func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *ClientBuilder { + f.initRuntimeObjects = append(f.initRuntimeObjects, initRuntimeObjs...) + return f +} + +// WithObjectTracker can be optionally used to initialize this fake client with testing.ObjectTracker. +func (f *ClientBuilder) WithObjectTracker(ot testing.ObjectTracker) *ClientBuilder { + f.objectTracker = ot + return f +} + +// WithIndex can be optionally used to register an index with name `field` and indexer `extractValue` +// for API objects of the same GroupVersionKind (GVK) as `obj` in the fake client. +// It can be invoked multiple times, both with objects of the same GVK or different ones. +// Invoking WithIndex twice with the same `field` and GVK (via `obj`) arguments will panic. +// WithIndex retrieves the GVK of `obj` using the scheme registered via WithScheme if +// WithScheme was previously invoked, the default scheme otherwise. +func (f *ClientBuilder) WithIndex(obj runtime.Object, field string, extractValue client.IndexerFunc) *ClientBuilder { + objScheme := f.scheme + if objScheme == nil { + objScheme = scheme.Scheme + } + + gvk, err := apiutil.GVKForObject(obj, objScheme) + if err != nil { + panic(err) + } + + // If this is the first index being registered, we initialize the map storing all the indexes. + if f.indexes == nil { + f.indexes = make(map[schema.GroupVersionKind]map[string]client.IndexerFunc) + } + + // If this is the first index being registered for the GroupVersionKind of `obj`, we initialize + // the map storing the indexes for that GroupVersionKind. + if f.indexes[gvk] == nil { + f.indexes[gvk] = make(map[string]client.IndexerFunc) + } + + if _, fieldAlreadyIndexed := f.indexes[gvk][field]; fieldAlreadyIndexed { + panic(fmt.Errorf("indexer conflict: field %s for GroupVersionKind %v is already indexed", + field, gvk)) + } + + f.indexes[gvk][field] = extractValue + + return f +} + +// WithStatusSubresource configures the passed object with a status subresource, which means +// calls to Update and Patch will not alter its status. +func (f *ClientBuilder) WithStatusSubresource(o ...client.Object) *ClientBuilder { + f.withStatusSubresource = append(f.withStatusSubresource, o...) + return f +} + +// WithInterceptorFuncs configures the client methods to be intercepted using the provided interceptor.Funcs. +func (f *ClientBuilder) WithInterceptorFuncs(interceptorFuncs interceptor.Funcs) *ClientBuilder { + f.interceptorFuncs = &interceptorFuncs + return f +} + +// Build builds and returns a new fake client. +func (f *ClientBuilder) Build() client.WithWatch { + if f.scheme == nil { + f.scheme = scheme.Scheme + } + if f.restMapper == nil { + f.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{}) + } + + var tracker versionedTracker + + withStatusSubResource := sets.New(inTreeResourcesWithStatus()...) + for _, o := range f.withStatusSubresource { + gvk, err := apiutil.GVKForObject(o, f.scheme) + if err != nil { + panic(fmt.Errorf("failed to get gvk for object %T: %w", withStatusSubResource, err)) + } + withStatusSubResource.Insert(gvk) + } + + if f.objectTracker == nil { + tracker = versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme, withStatusSubresource: withStatusSubResource} + } else { + tracker = versionedTracker{ObjectTracker: f.objectTracker, scheme: f.scheme, withStatusSubresource: withStatusSubResource} + } + + for _, obj := range f.initObject { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err)) + } + } + for _, obj := range f.initLists { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add list %v to fake client: %w", obj, err)) + } + } + for _, obj := range f.initRuntimeObjects { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add runtime object %v to fake client: %w", obj, err)) + } + } + + var result client.WithWatch = &fakeClient{ + tracker: tracker, + scheme: f.scheme, + restMapper: f.restMapper, + indexes: f.indexes, + withStatusSubresource: withStatusSubResource, + } + + if f.interceptorFuncs != nil { + result = interceptor.NewClient(result, *f.interceptorFuncs) + } + + return result +} + +const trackerAddResourceVersion = "999" + +func (t versionedTracker) Add(obj runtime.Object) error { + var objects []runtime.Object + if meta.IsListType(obj) { + var err error + objects, err = meta.ExtractList(obj) + if err != nil { + return err + } + } else { + objects = []runtime.Object{obj} + } + for _, obj := range objects { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + if accessor.GetDeletionTimestamp() != nil && len(accessor.GetFinalizers()) == 0 { + return fmt.Errorf("refusing to create obj %s with metadata.deletionTimestamp but no finalizers", accessor.GetName()) + } + if accessor.GetResourceVersion() == "" { + // We use a "magic" value of 999 here because this field + // is parsed as uint and and 0 is already used in Update. + // As we can't go lower, go very high instead so this can + // be recognized + accessor.SetResourceVersion(trackerAddResourceVersion) + } + + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + if err := t.ObjectTracker.Add(obj); err != nil { + return err + } + } + + return nil +} + +func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + if accessor.GetResourceVersion() != "" { + return apierrors.NewBadRequest("resourceVersion can not be set for Create requests") + } + accessor.SetResourceVersion("1") + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + if err := t.ObjectTracker.Create(gvr, obj, ns); err != nil { + accessor.SetResourceVersion("") + return err + } + + return nil +} + +// convertFromUnstructuredIfNecessary will convert runtime.Unstructured for a GVK that is recognized +// by the schema into the whatever the schema produces with New() for said GVK. +// This is required because the tracker unconditionally saves on manipulations, but its List() implementation +// tries to assign whatever it finds into a ListType it gets from schema.New() - Thus we have to ensure +// we save as the very same type, otherwise subsequent List requests will fail. +func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (runtime.Object, error) { + gvk := o.GetObjectKind().GroupVersionKind() + + u, isUnstructured := o.(runtime.Unstructured) + if !isUnstructured || !s.Recognizes(gvk) { + return o, nil + } + + typed, err := s.New(gvk) + if err != nil { + return nil, fmt.Errorf("scheme recognizes %s but failed to produce an object for it: %w", gvk, err) + } + + unstructuredSerialized, err := json.Marshal(u) + if err != nil { + return nil, fmt.Errorf("failed to serialize %T: %w", unstructuredSerialized, err) + } + if err := json.Unmarshal(unstructuredSerialized, typed); err != nil { + return nil, fmt.Errorf("failed to unmarshal the content of %T into %T: %w", u, typed, err) + } + + return typed, nil +} + +func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + isStatus := false + // We apply patches using a client-go reaction that ends up calling the trackers Update. As we can't change + // that reaction, we use the callstack to figure out if this originated from the status client. + if bytes.Contains(debug.Stack(), []byte("sigs.k8s.io/controller-runtime/pkg/client/fake.(*fakeSubResourceClient).statusPatch")) { + isStatus = true + } + return t.update(gvr, obj, ns, isStatus, false) +} + +func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, isStatus bool, deleting bool) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + + if accessor.GetName() == "" { + return apierrors.NewInvalid( + obj.GetObjectKind().GroupVersionKind().GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Empty() { + gvk, err = apiutil.GVKForObject(obj, t.scheme) + if err != nil { + return err + } + } + + oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName()) + if err != nil { + // If the resource is not found and the resource allows create on update, issue a + // create instead. + if apierrors.IsNotFound(err) && allowsCreateOnUpdate(gvk) { + return t.Create(gvr, obj, ns) + } + return err + } + + if t.withStatusSubresource.Has(gvk) { + if isStatus { // copy everything but status and metadata.ResourceVersion from original object + if err := copyStatusFrom(obj, oldObject); err != nil { + return fmt.Errorf("failed to copy non-status field for object with status subresouce: %w", err) + } + passedRV := accessor.GetResourceVersion() + if err := copyFrom(oldObject, obj); err != nil { + return fmt.Errorf("failed to restore non-status fields: %w", err) + } + accessor.SetResourceVersion(passedRV) + } else { // copy status from original object + if err := copyStatusFrom(oldObject, obj); err != nil { + return fmt.Errorf("failed to copy the status for object with status subresource: %w", err) + } + } + } else if isStatus { + return apierrors.NewNotFound(gvr.GroupResource(), accessor.GetName()) + } + + oldAccessor, err := meta.Accessor(oldObject) + if err != nil { + return err + } + + // If the new object does not have the resource version set and it allows unconditional update, + // default it to the resource version of the existing resource + if accessor.GetResourceVersion() == "" && allowsUnconditionalUpdate(gvk) { + accessor.SetResourceVersion(oldAccessor.GetResourceVersion()) + } + if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() { + return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) + } + if oldAccessor.GetResourceVersion() == "" { + oldAccessor.SetResourceVersion("0") + } + intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64) + if err != nil { + return fmt.Errorf("can not convert resourceVersion %q to int: %w", oldAccessor.GetResourceVersion(), err) + } + intResourceVersion++ + accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10)) + + if !deleting && !deletionTimestampEqual(accessor, oldAccessor) { + return fmt.Errorf("error: Unable to edit %s: metadata.deletionTimestamp field is immutable", accessor.GetName()) + } + + if !accessor.GetDeletionTimestamp().IsZero() && len(accessor.GetFinalizers()) == 0 { + return t.ObjectTracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) + } + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + return t.ObjectTracker.Update(gvr, obj, ns) +} + +func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + o, err := c.tracker.Get(gvr, key.Namespace, key.Name) + if err != nil { + return err + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + zero(obj) + _, _, err = decoder.Decode(j, nil, obj) + return err +} + +func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + gvk, err := apiutil.GVKForObject(list, c.scheme) + if err != nil { + return nil, err + } + + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return c.tracker.Watch(gvr, listOpts.Namespace) +} + +func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + originalKind := gvk.Kind + + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + if _, isUnstructuredList := obj.(runtime.Unstructured); isUnstructuredList && !c.scheme.Recognizes(gvk) { + // We need to register the ListKind with UnstructuredList: + // https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/staging/src/k8s.io/client-go/dynamic/fake/simple.go#L44-L51 + c.schemeWriteLock.Lock() + c.scheme.AddKnownTypeWithName(gvk.GroupVersion().WithKind(gvk.Kind+"List"), &unstructured.UnstructuredList{}) + c.schemeWriteLock.Unlock() + } + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, listOpts.Namespace) + if err != nil { + return err + } + + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(originalKind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + zero(obj) + _, _, err = decoder.Decode(j, nil, obj) + if err != nil { + return err + } + + if listOpts.LabelSelector == nil && listOpts.FieldSelector == nil { + return nil + } + + // If we're here, either a label or field selector are specified (or both), so before we return + // the list we must filter it. If both selectors are set, they are ANDed. + objs, err := meta.ExtractList(obj) + if err != nil { + return err + } + + filteredList, err := c.filterList(objs, gvk, listOpts.LabelSelector, listOpts.FieldSelector) + if err != nil { + return err + } + + return meta.SetList(obj, filteredList) +} + +func (c *fakeClient) filterList(list []runtime.Object, gvk schema.GroupVersionKind, ls labels.Selector, fs fields.Selector) ([]runtime.Object, error) { + // Filter the objects with the label selector + filteredList := list + if ls != nil { + objsFilteredByLabel, err := objectutil.FilterWithLabels(list, ls) + if err != nil { + return nil, err + } + filteredList = objsFilteredByLabel + } + + // Filter the result of the previous pass with the field selector + if fs != nil { + objsFilteredByField, err := c.filterWithFields(filteredList, gvk, fs) + if err != nil { + return nil, err + } + filteredList = objsFilteredByField + } + + return filteredList, nil +} + +func (c *fakeClient) filterWithFields(list []runtime.Object, gvk schema.GroupVersionKind, fs fields.Selector) ([]runtime.Object, error) { + // We only allow filtering on the basis of a single field to ensure consistency with the + // behavior of the cache reader (which we're faking here). + fieldKey, fieldVal, requiresExact := selector.RequiresExactMatch(fs) + if !requiresExact { + return nil, fmt.Errorf("field selector %s is not in one of the two supported forms \"key==val\" or \"key=val\"", + fs) + } + + // Field selection is mimicked via indexes, so there's no sane answer this function can give + // if there are no indexes registered for the GroupVersionKind of the objects in the list. + indexes := c.indexes[gvk] + if len(indexes) == 0 || indexes[fieldKey] == nil { + return nil, fmt.Errorf("List on GroupVersionKind %v specifies selector on field %s, but no "+ + "index with name %s has been registered for GroupVersionKind %v", gvk, fieldKey, fieldKey, gvk) + } + + indexExtractor := indexes[fieldKey] + filteredList := make([]runtime.Object, 0, len(list)) + for _, obj := range list { + if c.objMatchesFieldSelector(obj, indexExtractor, fieldVal) { + filteredList = append(filteredList, obj) + } + } + return filteredList, nil +} + +func (c *fakeClient) objMatchesFieldSelector(o runtime.Object, extractIndex client.IndexerFunc, val string) bool { + obj, isClientObject := o.(client.Object) + if !isClientObject { + panic(fmt.Errorf("expected object %v to be of type client.Object, but it's not", o)) + } + + for _, extractedVal := range extractIndex(obj) { + if extractedVal == val { + return true + } + } + + return false +} + +func (c *fakeClient) Scheme() *runtime.Scheme { + return c.scheme +} + +func (c *fakeClient) RESTMapper() meta.RESTMapper { + return c.restMapper +} + +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *fakeClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *fakeClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.restMapper) +} + +func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + createOptions := &client.CreateOptions{} + createOptions.ApplyOptions(opts) + + for _, dryRunOpt := range createOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + if accessor.GetName() == "" && accessor.GetGenerateName() != "" { + base := accessor.GetGenerateName() + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength))) + } + // Ignore attempts to set deletion timestamp + if !accessor.GetDeletionTimestamp().IsZero() { + accessor.SetDeletionTimestamp(nil) + } + + return c.tracker.Create(gvr, obj, accessor.GetNamespace()) +} + +func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + delOptions := client.DeleteOptions{} + delOptions.ApplyOptions(opts) + + for _, dryRunOpt := range delOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + // Check the ResourceVersion if that Precondition was specified. + if delOptions.Preconditions != nil && delOptions.Preconditions.ResourceVersion != nil { + name := accessor.GetName() + dbObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), name) + if err != nil { + return err + } + oldAccessor, err := meta.Accessor(dbObj) + if err != nil { + return err + } + actualRV := oldAccessor.GetResourceVersion() + expectRV := *delOptions.Preconditions.ResourceVersion + if actualRV != expectRV { + msg := fmt.Sprintf( + "the ResourceVersion in the precondition (%s) does not match the ResourceVersion in record (%s). "+ + "The object might have been modified", + expectRV, actualRV) + return apierrors.NewConflict(gvr.GroupResource(), name, errors.New(msg)) + } + } + + return c.deleteObject(gvr, accessor) +} + +func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + dcOptions := client.DeleteAllOfOptions{} + dcOptions.ApplyOptions(opts) + + for _, dryRunOpt := range dcOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace) + if err != nil { + return err + } + + objs, err := meta.ExtractList(o) + if err != nil { + return err + } + filteredObjs, err := objectutil.FilterWithLabels(objs, dcOptions.LabelSelector) + if err != nil { + return err + } + for _, o := range filteredObjs { + accessor, err := meta.Accessor(o) + if err != nil { + return err + } + err = c.deleteObject(gvr, accessor) + if err != nil { + return err + } + } + return nil +} + +func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return c.update(obj, false, opts...) +} + +func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.UpdateOption) error { + updateOptions := &client.UpdateOptions{} + updateOptions.ApplyOptions(opts) + + for _, dryRunOpt := range updateOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + return c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false) +} + +func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return c.patch(obj, patch, opts...) +} + +func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + patchOptions := &client.PatchOptions{} + patchOptions.ApplyOptions(opts) + + for _, dryRunOpt := range patchOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + data, err := patch.Data(obj) + if err != nil { + return err + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + oldObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) + if err != nil { + return err + } + oldAccessor, err := meta.Accessor(oldObj) + if err != nil { + return err + } + + // Apply patch without updating object. + // To remain in accordance with the behavior of k8s api behavior, + // a patch must not allow for changes to the deletionTimestamp of an object. + // The reaction() function applies the patch to the object and calls Update(), + // whereas dryPatch() replicates this behavior but skips the call to Update(). + // This ensures that the patch may be rejected if a deletionTimestamp is modified, prior + // to updating the object. + action := testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data) + o, err := dryPatch(action, c.tracker) + if err != nil { + return err + } + newObj, err := meta.Accessor(o) + if err != nil { + return err + } + + // Validate that deletionTimestamp has not been changed + if !deletionTimestampEqual(newObj, oldAccessor) { + return fmt.Errorf("rejected patch, metadata.deletionTimestamp immutable") + } + + reaction := testing.ObjectReaction(c.tracker) + handled, o, err := reaction(action) + if err != nil { + return err + } + if !handled { + panic("tracker could not handle patch method") + } + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + j, err := json.Marshal(o) + if err != nil { + return err + } + decoder := scheme.Codecs.UniversalDecoder() + zero(obj) + _, _, err = decoder.Decode(j, nil, obj) + return err +} + +// Applying a patch results in a deletionTimestamp that is truncated to the nearest second. +// Check that the diff between a new and old deletion timestamp is within a reasonable threshold +// to be considered unchanged. +func deletionTimestampEqual(newObj metav1.Object, obj metav1.Object) bool { + newTime := newObj.GetDeletionTimestamp() + oldTime := obj.GetDeletionTimestamp() + + if newTime == nil || oldTime == nil { + return newTime == oldTime + } + return newTime.Time.Sub(oldTime.Time).Abs() < time.Second +} + +// The behavior of applying the patch is pulled out into dryPatch(), +// which applies the patch and returns an object, but does not Update() the object. +// This function returns a patched runtime object that may then be validated before a call to Update() is executed. +// This results in some code duplication, but was found to be a cleaner alternative than unmarshalling and introspecting the patch data +// and easier than refactoring the k8s client-go method upstream. +// Duplicate of upstream: https://github.com/kubernetes/client-go/blob/783d0d33626e59d55d52bfd7696b775851f92107/testing/fixture.go#L146-L194 +func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + return nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.StrategicMergePatchType, types.ApplyPatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("PatchType is not supported") + } + return obj, nil +} + +// copyStatusFrom copies the status from old into new +func copyStatusFrom(old, new runtime.Object) error { + oldMapStringAny, err := toMapStringAny(old) + if err != nil { + return fmt.Errorf("failed to convert old to *unstructured.Unstructured: %w", err) + } + newMapStringAny, err := toMapStringAny(new) + if err != nil { + return fmt.Errorf("failed to convert new to *unststructured.Unstructured: %w", err) + } + + newMapStringAny["status"] = oldMapStringAny["status"] + + if err := fromMapStringAny(newMapStringAny, new); err != nil { + return fmt.Errorf("failed to convert back from map[string]any: %w", err) + } + + return nil +} + +// copyFrom copies from old into new +func copyFrom(old, new runtime.Object) error { + oldMapStringAny, err := toMapStringAny(old) + if err != nil { + return fmt.Errorf("failed to convert old to *unstructured.Unstructured: %w", err) + } + if err := fromMapStringAny(oldMapStringAny, new); err != nil { + return fmt.Errorf("failed to convert back from map[string]any: %w", err) + } + + return nil +} + +func toMapStringAny(obj runtime.Object) (map[string]any, error) { + if unstructured, isUnstructured := obj.(*unstructured.Unstructured); isUnstructured { + return unstructured.Object, nil + } + + serialized, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + u := map[string]any{} + return u, json.Unmarshal(serialized, &u) +} + +func fromMapStringAny(u map[string]any, target runtime.Object) error { + if targetUnstructured, isUnstructured := target.(*unstructured.Unstructured); isUnstructured { + targetUnstructured.Object = u + return nil + } + + serialized, err := json.Marshal(u) + if err != nil { + return fmt.Errorf("failed to serialize: %w", err) + } + + zero(target) + if err := json.Unmarshal(serialized, &target); err != nil { + return fmt.Errorf("failed to deserialize: %w", err) + } + + return nil +} + +func (c *fakeClient) Status() client.SubResourceWriter { + return c.SubResource("status") +} + +func (c *fakeClient) SubResource(subResource string) client.SubResourceClient { + return &fakeSubResourceClient{client: c, subResource: subResource} +} + +func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor metav1.Object) error { + old, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) + if err == nil { + oldAccessor, err := meta.Accessor(old) + if err == nil { + if len(oldAccessor.GetFinalizers()) > 0 { + now := metav1.Now() + oldAccessor.SetDeletionTimestamp(&now) + // Call update directly with mutability parameter set to true to allow + // changes to deletionTimestamp + return c.tracker.update(gvr, old, accessor.GetNamespace(), false, true) + } + } + } + + //TODO: implement propagation + return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) +} + +func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return gvr, nil +} + +type fakeSubResourceClient struct { + client *fakeClient + subResource string +} + +func (sw *fakeSubResourceClient) Get(ctx context.Context, obj, subResource client.Object, opts ...client.SubResourceGetOption) error { + panic("fakeSubResourceClient does not support get") +} + +func (sw *fakeSubResourceClient) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + switch sw.subResource { + case "eviction": + _, isEviction := subResource.(*policyv1beta1.Eviction) + if !isEviction { + _, isEviction = subResource.(*policyv1.Eviction) + } + if !isEviction { + return apierrors.NewBadRequest(fmt.Sprintf("got invalid type %t, expected Eviction", subResource)) + } + if _, isPod := obj.(*corev1.Pod); !isPod { + return apierrors.NewNotFound(schema.GroupResource{}, "") + } + + return sw.client.Delete(ctx, obj) + default: + return fmt.Errorf("fakeSubResourceWriter does not support create for %s", sw.subResource) + } +} + +func (sw *fakeSubResourceClient) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + updateOptions := client.SubResourceUpdateOptions{} + updateOptions.ApplyOptions(opts) + + body := obj + if updateOptions.SubResourceBody != nil { + body = updateOptions.SubResourceBody + } + return sw.client.update(body, true, &updateOptions.UpdateOptions) +} + +func (sw *fakeSubResourceClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + patchOptions := client.SubResourcePatchOptions{} + patchOptions.ApplyOptions(opts) + + body := obj + if patchOptions.SubResourceBody != nil { + body = patchOptions.SubResourceBody + } + + // this is necessary to identify that last call was made for status patch, through stack trace. + if sw.subResource == "status" { + return sw.statusPatch(body, patch, patchOptions) + } + + return sw.client.patch(body, patch, &patchOptions.PatchOptions) +} + +func (sw *fakeSubResourceClient) statusPatch(body client.Object, patch client.Patch, patchOptions client.SubResourcePatchOptions) error { + return sw.client.patch(body, patch, &patchOptions.PatchOptions) +} + +func allowsUnconditionalUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "apps": + switch gvk.Kind { + case "ControllerRevision", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet": + return true + } + case "autoscaling": + switch gvk.Kind { + case "HorizontalPodAutoscaler": + return true + } + case "batch": + switch gvk.Kind { + case "CronJob", "Job": + return true + } + case "certificates": + switch gvk.Kind { + case "Certificates": + return true + } + case "flowcontrol": + switch gvk.Kind { + case "FlowSchema", "PriorityLevelConfiguration": + return true + } + case "networking": + switch gvk.Kind { + case "Ingress", "IngressClass", "NetworkPolicy": + return true + } + case "policy": + switch gvk.Kind { + case "PodSecurityPolicy": + return true + } + case "rbac.authorization.k8s.io": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "scheduling": + switch gvk.Kind { + case "PriorityClass": + return true + } + case "settings": + switch gvk.Kind { + case "PodPreset": + return true + } + case "storage": + switch gvk.Kind { + case "StorageClass": + return true + } + case "": + switch gvk.Kind { + case "ConfigMap", "Endpoint", "Event", "LimitRange", "Namespace", "Node", + "PersistentVolume", "PersistentVolumeClaim", "Pod", "PodTemplate", + "ReplicationController", "ResourceQuota", "Secret", "Service", + "ServiceAccount", "EndpointSlice": + return true + } + } + + return false +} + +func allowsCreateOnUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "coordination": + switch gvk.Kind { + case "Lease": + return true + } + case "node": + switch gvk.Kind { + case "RuntimeClass": + return true + } + case "rbac": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "": + switch gvk.Kind { + case "Endpoint", "Event", "LimitRange", "Service": + return true + } + } + + return false +} + +func inTreeResourcesWithStatus() []schema.GroupVersionKind { + return []schema.GroupVersionKind{ + {Version: "v1", Kind: "Namespace"}, + {Version: "v1", Kind: "Node"}, + {Version: "v1", Kind: "PersistentVolumeClaim"}, + {Version: "v1", Kind: "PersistentVolume"}, + {Version: "v1", Kind: "Pod"}, + {Version: "v1", Kind: "ReplicationController"}, + {Version: "v1", Kind: "Service"}, + + {Group: "apps", Version: "v1", Kind: "Deployment"}, + {Group: "apps", Version: "v1", Kind: "DaemonSet"}, + {Group: "apps", Version: "v1", Kind: "ReplicaSet"}, + {Group: "apps", Version: "v1", Kind: "StatefulSet"}, + + {Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"}, + + {Group: "batch", Version: "v1", Kind: "CronJob"}, + {Group: "batch", Version: "v1", Kind: "Job"}, + + {Group: "certificates.k8s.io", Version: "v1", Kind: "CertificateSigningRequest"}, + + {Group: "networking.k8s.io", Version: "v1", Kind: "Ingress"}, + {Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicy"}, + + {Group: "policy", Version: "v1", Kind: "PodDisruptionBudget"}, + + {Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachment"}, + + {Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}, + + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"}, + } +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go new file mode 100644 index 000000000..d42347a2e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package fake provides a fake client for testing. + +A fake client is backed by its simple object store indexed by GroupVersionResource. +You can create a fake client with optional objects. + + client := NewClientBuilder().WithScheme(scheme).WithObj(initObjs...).Build() + +You can invoke the methods defined in the Client interface. + +When in doubt, it's almost always better not to use this package and instead use +envtest.Environment with a real client and API server. + +WARNING: ⚠️ Current Limitations / Known Issues with the fake Client ⚠️ + - This client does not have a way to inject specific errors to test handled vs. unhandled errors. + - There is some support for sub resources which can cause issues with tests if you're trying to update + e.g. metadata and status in the same reconcile. + - No OpenAPI validation is performed when creating or updating objects. + - ObjectMeta's `Generation` and `ResourceVersion` don't behave properly, Patch or Update + operations that rely on these fields will fail, or give false positives. +*/ +package fake diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go new file mode 100644 index 000000000..3d3f3cb01 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go @@ -0,0 +1,166 @@ +package interceptor + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Funcs contains functions that are called instead of the underlying client's methods. +type Funcs struct { + Get func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error + List func(ctx context.Context, client client.WithWatch, list client.ObjectList, opts ...client.ListOption) error + Create func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.CreateOption) error + Delete func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteOption) error + DeleteAllOf func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteAllOfOption) error + Update func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.UpdateOption) error + Patch func(ctx context.Context, client client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error + Watch func(ctx context.Context, client client.WithWatch, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) + SubResource func(client client.WithWatch, subResource string) client.SubResourceClient + SubResourceGet func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceGetOption) error + SubResourceCreate func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error + SubResourceUpdate func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, opts ...client.SubResourceUpdateOption) error + SubResourcePatch func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error +} + +// NewClient returns a new interceptor client that calls the functions in funcs instead of the underlying client's methods, if they are not nil. +func NewClient(interceptedClient client.WithWatch, funcs Funcs) client.WithWatch { + return interceptor{ + client: interceptedClient, + funcs: funcs, + } +} + +type interceptor struct { + client client.WithWatch + funcs Funcs +} + +var _ client.WithWatch = &interceptor{} + +func (c interceptor) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +func (c interceptor) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + +func (c interceptor) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if c.funcs.Get != nil { + return c.funcs.Get(ctx, c.client, key, obj, opts...) + } + return c.client.Get(ctx, key, obj, opts...) +} + +func (c interceptor) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if c.funcs.List != nil { + return c.funcs.List(ctx, c.client, list, opts...) + } + return c.client.List(ctx, list, opts...) +} + +func (c interceptor) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if c.funcs.Create != nil { + return c.funcs.Create(ctx, c.client, obj, opts...) + } + return c.client.Create(ctx, obj, opts...) +} + +func (c interceptor) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if c.funcs.Delete != nil { + return c.funcs.Delete(ctx, c.client, obj, opts...) + } + return c.client.Delete(ctx, obj, opts...) +} + +func (c interceptor) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if c.funcs.Update != nil { + return c.funcs.Update(ctx, c.client, obj, opts...) + } + return c.client.Update(ctx, obj, opts...) +} + +func (c interceptor) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + if c.funcs.Patch != nil { + return c.funcs.Patch(ctx, c.client, obj, patch, opts...) + } + return c.client.Patch(ctx, obj, patch, opts...) +} + +func (c interceptor) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + if c.funcs.DeleteAllOf != nil { + return c.funcs.DeleteAllOf(ctx, c.client, obj, opts...) + } + return c.client.DeleteAllOf(ctx, obj, opts...) +} + +func (c interceptor) Status() client.SubResourceWriter { + return c.SubResource("status") +} + +func (c interceptor) SubResource(subResource string) client.SubResourceClient { + if c.funcs.SubResource != nil { + return c.funcs.SubResource(c.client, subResource) + } + return subResourceInterceptor{ + subResourceName: subResource, + client: c.client, + funcs: c.funcs, + } +} + +func (c interceptor) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +func (c interceptor) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +func (c interceptor) Watch(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + if c.funcs.Watch != nil { + return c.funcs.Watch(ctx, c.client, obj, opts...) + } + return c.client.Watch(ctx, obj, opts...) +} + +type subResourceInterceptor struct { + subResourceName string + client client.Client + funcs Funcs +} + +var _ client.SubResourceClient = &subResourceInterceptor{} + +func (s subResourceInterceptor) Get(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceGetOption) error { + if s.funcs.SubResourceGet != nil { + return s.funcs.SubResourceGet(ctx, s.client, s.subResourceName, obj, subResource, opts...) + } + return s.client.SubResource(s.subResourceName).Get(ctx, obj, subResource, opts...) +} + +func (s subResourceInterceptor) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + if s.funcs.SubResourceCreate != nil { + return s.funcs.SubResourceCreate(ctx, s.client, s.subResourceName, obj, subResource, opts...) + } + return s.client.SubResource(s.subResourceName).Create(ctx, obj, subResource, opts...) +} + +func (s subResourceInterceptor) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + if s.funcs.SubResourceUpdate != nil { + return s.funcs.SubResourceUpdate(ctx, s.client, s.subResourceName, obj, opts...) + } + return s.client.SubResource(s.subResourceName).Update(ctx, obj, opts...) +} + +func (s subResourceInterceptor) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + if s.funcs.SubResourcePatch != nil { + return s.funcs.SubResourcePatch(ctx, s.client, s.subResourceName, obj, patch, opts...) + } + return s.client.SubResource(s.subResourceName).Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go new file mode 100644 index 000000000..0189c0432 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectutil + +import ( + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// FilterWithLabels returns a copy of the items in objs matching labelSel. +func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { + outItems := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outItems = append(outItems, obj.DeepCopyObject()) + } + return outItems, nil +} From be4fced6804f6f5c2a2f217d284a63a54193eb11 Mon Sep 17 00:00:00 2001 From: Rafael Franzke Date: Wed, 11 Oct 2023 16:21:07 +0200 Subject: [PATCH 4/8] Rework main entrypoint - no longer depend on `oscommon` package --- .../app/app.go | 25 ++++++++----------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/cmd/gardener-extension-os-gardenlinux/app/app.go b/cmd/gardener-extension-os-gardenlinux/app/app.go index 9d39e4224..5f2befbd2 100644 --- a/cmd/gardener-extension-os-gardenlinux/app/app.go +++ b/cmd/gardener-extension-os-gardenlinux/app/app.go @@ -23,19 +23,17 @@ import ( controllercmd "github.com/gardener/gardener/extensions/pkg/controller/cmd" "github.com/gardener/gardener/extensions/pkg/controller/heartbeat" heartbeatcmd "github.com/gardener/gardener/extensions/pkg/controller/heartbeat/cmd" - "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon" - oscommoncmd "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/cmd" + osccontroller "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig" "github.com/gardener/gardener/extensions/pkg/util" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" componentbaseconfig "k8s.io/component-base/config" "sigs.k8s.io/controller-runtime/pkg/client" - runtimelog "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" - "github.com/gardener/gardener-extension-os-gardenlinux/pkg/generator" - "github.com/gardener/gardener-extension-os-gardenlinux/pkg/generator/gardenlinux" - "github.com/gardener/gardener-extension-os-gardenlinux/pkg/generator/memoryone" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/controller/operatingsystemconfig" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/gardenlinux" + "github.com/gardener/gardener-extension-os-gardenlinux/pkg/memoryone" ) var ( @@ -45,12 +43,6 @@ var ( // NewControllerCommand returns a new Command with a new Generator func NewControllerCommand(ctx context.Context) *cobra.Command { - gardenLinuxGenerator := generator.CloudInitGenerator() - if gardenLinuxGenerator == nil { - runtimelog.Log.Error(fmt.Errorf("generator is nil"), "Error executing the main controller command") - os.Exit(1) - } - var ( generalOpts = &controllercmd.GeneralOptions{} restOpts = &controllercmd.RESTOptions{} @@ -71,7 +63,10 @@ func NewControllerCommand(ctx context.Context) *cobra.Command { reconcileOpts = &controllercmd.ReconcilerOptions{} - controllerSwitches = oscommoncmd.SwitchOptions(ctrlName, osTypes, gardenLinuxGenerator) + controllerSwitches = controllercmd.NewSwitchOptions( + controllercmd.Switch(osccontroller.ControllerName, operatingsystemconfig.AddToManager), + controllercmd.Switch(heartbeat.ControllerName, heartbeat.AddToManager), + ) aggOption = controllercmd.NewOptionAggregator( generalOpts, @@ -120,10 +115,10 @@ func NewControllerCommand(ctx context.Context) *cobra.Command { return fmt.Errorf("could not update manager scheme: %w", err) } - ctrlOpts.Completed().Apply(&oscommon.DefaultAddOptions.Controller) + ctrlOpts.Completed().Apply(&operatingsystemconfig.DefaultAddOptions.Controller) heartbeatCtrlOpts.Completed().Apply(&heartbeat.DefaultAddOptions) - reconcileOpts.Completed().Apply(&oscommon.DefaultAddOptions.IgnoreOperationAnnotation) + reconcileOpts.Completed().Apply(&operatingsystemconfig.DefaultAddOptions.IgnoreOperationAnnotation) if err := controllerSwitches.Completed().AddToManager(ctx, mgr); err != nil { return fmt.Errorf("could not add controller to manager: %w", err) From 490ca4e3ccc12536c01dd4e256382f6874191524 Mon Sep 17 00:00:00 2001 From: Rafael Franzke Date: Wed, 11 Oct 2023 16:21:51 +0200 Subject: [PATCH 5/8] [make revendor] --- .../operatingsystemconfig/oscommon/README.md | 82 ------------------- .../operatingsystemconfig/oscommon/add.go | 59 ------------- .../oscommon/cmd/options.go | 37 --------- .../gardener/gardener/pkg/utils/test/test.go | 8 +- .../gardener/pkg/utils/test/test_resources.go | 2 - vendor/modules.txt | 2 - 6 files changed, 4 insertions(+), 186 deletions(-) delete mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/README.md delete mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/add.go delete mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/cmd/options.go diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/README.md b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/README.md deleted file mode 100644 index bd092dfb8..000000000 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# [Gardener Extension for OS Configurations](https://gardener.cloud) - -[![Go Report Card](https://goreportcard.com/badge/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon)](https://goreportcard.com/report/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon) - -**⚠️This package is deprecated and will be removed as soon as the [`UseGardenerNodeAgent` feature gate](../../../../../docs/deployment/feature_gates.md) has been promoted to GA.** - ---- - -Project Gardener implements the automated management and operation of [Kubernetes](https://kubernetes.io/) clusters as a service. Its main principle is to leverage Kubernetes concepts for all of its tasks. - -Recently, most of the vendor specific logic has been developed [in-tree](https://github.com/gardener/gardener). However, the project has grown to a size where it is very hard to extend, maintain, and test. With [GEP-1](https://github.com/gardener/gardener/blob/master/docs/proposals/01-extensibility.md) we have proposed how the architecture can be changed in a way to support external controllers that contain their very own vendor specifics. This way, we can keep Gardener core clean and independent. - -The `oscommon` offers a generic controller that operates on the `OperatingSystemConfig` resource in the `extensions.gardener.cloud/v1alpha1` API group. It manages those objects that are requesting for an specific operating system. - - -```yaml ---- -apiVersion: extensions.gardener.cloud/v1alpha1 -kind: OperatingSystemConfig -metadata: - name: pool-01-original - namespace: default -spec: - type: - units: - ... - files: - ... -``` - -Please find [a concrete example](example/operatingsystemconfig.yaml) in the `example` folder. - -After reconciliation the resulting data will be stored in a secret within the same namespace (as the config itself might contain confidential data). The name of the secret will be written into the resource's `.status` field: - -```yaml -... -status: - ... - cloudConfig: - secretRef: - name: osc-result-pool-01-original - namespace: default - command: - units: - - docker-monitor.service - - kubelet-monitor.service - - kubelet.service -``` -The secret has one data key `cloud_config` that stores the generation. - -The generation of this operating system representation is executed by a [`Generator`](generator/generator.go). A default implementation for the `generator` based on [go templates](https://golang.org/pkg/text/template/) is provided in [`template`](template). - -In addition, `oscommon` provides set of basic [`tests`](generator/test/README.md) which can be used to test the operating system specific generator. - -Please find more information regarding the extensibility concepts and a detailed proposal [here](https://github.com/gardener/gardener/blob/master/docs/proposals/01-extensibility.md). - ----- - -## How to use oscommon in a new operating system configuration controller - -When implementing a controller for a specific operating system, it is necessary to provide: -* A command line application for launching the controller -* A template for translating the `cloud-config` to the format requried by the operating system. -* Alternatively, a new generator can also be provided, in case the transformations required by -the operating system requires more complex logic than provided by go templates. -* A test that uses the test description provided in [`pkg/generator/test`] -* A directory with test files -* The [`helm`](https://github.com/helm/helm) Chart for operator registration and installation - -Please refer to the [`os-suse-chost controller`](https://github.com/gardener/gardener-extension-os-suse-chost) for a concrete example. - -## Feedback and Support - -Feedback and contributions are always welcome. Please report bugs or suggestions as [GitHub issues](https://github.com/gardener/gardener/issues) or join our [Slack channel #gardener](https://kubernetes.slack.com/messages/gardener) (please invite yourself to the Kubernetes workspace [here](http://slack.k8s.io)). - -## Learn more! - -Please find further resources about out project here: - -* [Our landing page gardener.cloud](https://gardener.cloud/) -* ["Gardener, the Kubernetes Botanist" blog on kubernetes.io](https://kubernetes.io/blog/2018/05/17/gardener/) -* [GEP-1 (Gardener Enhancement Proposal) on extensibility](https://github.com/gardener/gardener/blob/master/docs/proposals/01-extensibility.md) diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/add.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/add.go deleted file mode 100644 index 816f6a465..000000000 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/add.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package oscommon - -import ( - "context" - - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig" - "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator" - "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/generator" -) - -// DefaultAddOptions are the default AddOptions for AddToManager. -var DefaultAddOptions = AddOptions{} - -// AddOptions are options to apply when adding the OSC controller to the manager. -type AddOptions struct { - // Controller are the controller.Options. - Controller controller.Options - // IgnoreOperationAnnotation specifies whether to ignore the operation annotation or not. - IgnoreOperationAnnotation bool -} - -// AddToManagerWithOptions adds a controller with the given Options to the given manager. -// The opts.Reconciler is being set with a newly instantiated actuator. -// Deprecated: The `oscommon` package is deprecated and will be removed as soon as the UseGardenerNodeAgent feature gate -// has been promoted to GA. -// TODO(rfranzke): Remove the `oscommon` package after the UseGardenerNodeAgent feature gate has been promoted to GA. -func AddToManagerWithOptions(ctx context.Context, mgr manager.Manager, ctrlName string, osTypes []string, generator generator.Generator, opts AddOptions) error { - return operatingsystemconfig.Add(mgr, operatingsystemconfig.AddArgs{ - Actuator: actuator.NewActuator(mgr, ctrlName, generator), - Predicates: operatingsystemconfig.DefaultPredicates(ctx, mgr, opts.IgnoreOperationAnnotation), - Types: osTypes, - ControllerOptions: opts.Controller, - }) -} - -// AddToManager adds a controller with the default Options. -// Deprecated: The `oscommon` package is deprecated and will be removed as soon as the UseGardenerNodeAgent feature gate -// has been promoted to GA. -// TODO(rfranzke): Remove the `oscommon` package after the UseGardenerNodeAgent feature gate has been promoted to GA. -func AddToManager(ctx context.Context, mgr manager.Manager, ctrlName string, osTypes []string, generator generator.Generator) error { - return AddToManagerWithOptions(ctx, mgr, ctrlName, osTypes, generator, DefaultAddOptions) -} diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/cmd/options.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/cmd/options.go deleted file mode 100644 index d74d02dca..000000000 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/cmd/options.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "context" - - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/gardener/gardener/extensions/pkg/controller/cmd" - extensionsheartbeatcontroller "github.com/gardener/gardener/extensions/pkg/controller/heartbeat" - "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig" - "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon" - "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/generator" -) - -// SwitchOptions are the cmd.SwitchOptions for the provider controllers. -func SwitchOptions(ctrlName string, osTypes []string, generator generator.Generator) *cmd.SwitchOptions { - return cmd.NewSwitchOptions( - cmd.Switch(operatingsystemconfig.ControllerName, func(ctx context.Context, mgr manager.Manager) error { - return oscommon.AddToManager(ctx, mgr, ctrlName, osTypes, generator) - }), - cmd.Switch(extensionsheartbeatcontroller.ControllerName, extensionsheartbeatcontroller.AddToManager), - ) -} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/test.go b/vendor/github.com/gardener/gardener/pkg/utils/test/test.go index a9588cb64..3fa21f56e 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/test/test.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/test.go @@ -231,7 +231,7 @@ func EXPECTPatchWithOptimisticLock(ctx interface{}, c *mockclient.MockClient, ex func expectPatch(ctx interface{}, c *mockclient.MockClient, expectedObj client.Object, expectedPatch client.Patch, rets ...interface{}) *gomock.Call { expectedData, expectedErr := expectedPatch.Data(expectedObj) - Expect(expectedErr).To(BeNil()) + Expect(expectedErr).NotTo(HaveOccurred()) if rets == nil { rets = []interface{}{nil} @@ -250,7 +250,7 @@ func expectPatch(ctx interface{}, c *mockclient.MockClient, expectedObj client.O Expect(obj).To(DeepEqual(expectedObj)) data, err := patch.Data(obj) - Expect(err).To(BeNil()) + Expect(err).NotTo(HaveOccurred()) Expect(patch.Type()).To(Equal(expectedPatch.Type())) Expect(string(data)).To(Equal(string(expectedData))) return nil @@ -260,7 +260,7 @@ func expectPatch(ctx interface{}, c *mockclient.MockClient, expectedObj client.O func expectStatusPatch(ctx interface{}, c *mockclient.MockStatusWriter, expectedObj client.Object, expectedPatch client.Patch, rets ...interface{}) *gomock.Call { expectedData, expectedErr := expectedPatch.Data(expectedObj) - Expect(expectedErr).To(BeNil()) + Expect(expectedErr).NotTo(HaveOccurred()) if rets == nil { rets = []interface{}{nil} @@ -279,7 +279,7 @@ func expectStatusPatch(ctx interface{}, c *mockclient.MockStatusWriter, expected Expect(obj).To(DeepEqual(expectedObj)) data, err := patch.Data(obj) - Expect(err).To(BeNil()) + Expect(err).NotTo(HaveOccurred()) Expect(patch.Type()).To(Equal(expectedPatch.Type())) Expect(string(data)).To(Equal(string(expectedData))) return nil diff --git a/vendor/github.com/gardener/gardener/pkg/utils/test/test_resources.go b/vendor/github.com/gardener/gardener/pkg/utils/test/test_resources.go index 820bfb610..c58948612 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/test/test_resources.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/test/test_resources.go @@ -76,7 +76,6 @@ func ReadTestResources(scheme *runtime.Scheme, namespaceName, path string) ([]cl var objects []client.Object for _, file := range files { - if file.IsDir() { continue } @@ -108,7 +107,6 @@ func ReadTestResources(scheme *runtime.Scheme, namespaceName, path string) ([]cl } } return objects, nil - } // readDocuments reads documents from file diff --git a/vendor/modules.txt b/vendor/modules.txt index 057d97f05..c6e27e36f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -76,9 +76,7 @@ github.com/gardener/gardener/extensions/pkg/controller/cmd github.com/gardener/gardener/extensions/pkg/controller/heartbeat github.com/gardener/gardener/extensions/pkg/controller/heartbeat/cmd github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig -github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/actuator -github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/cmd github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/generator github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/generator/test github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/template From e9ab9eeda3623b13715959f4b103bd1af0c9041f Mon Sep 17 00:00:00 2001 From: Rafael Franzke Date: Thu, 26 Oct 2023 14:06:36 +0200 Subject: [PATCH 6/8] Set `UseGardenerNodeAgent` option based on gardenlet feature gate --- .../templates/deployment.yaml | 3 +++ charts/gardener-extension-os-gardenlinux/values.yaml | 2 ++ cmd/gardener-extension-os-gardenlinux/app/app.go | 3 +++ example/controller-registration.yaml | 2 +- 4 files changed, 9 insertions(+), 1 deletion(-) diff --git a/charts/gardener-extension-os-gardenlinux/templates/deployment.yaml b/charts/gardener-extension-os-gardenlinux/templates/deployment.yaml index dc927a853..90fc7ed87 100644 --- a/charts/gardener-extension-os-gardenlinux/templates/deployment.yaml +++ b/charts/gardener-extension-os-gardenlinux/templates/deployment.yaml @@ -45,6 +45,9 @@ spec: - --ignore-operation-annotation={{ .Values.controllers.ignoreOperationAnnotation }} - --gardener-version={{ .Values.gardener.version }} - --metrics-bind-address=:{{ .Values.metrics.port }} + {{- if .Values.gardener.gardenlet.featureGates.UseGardenerNodeAgent }} + - --gardenlet-uses-gardener-node-agent={{ .Values.gardener.gardenlet.featureGates.UseGardenerNodeAgent }} + {{- end }} env: - name: LEADER_ELECTION_NAMESPACE valueFrom: diff --git a/charts/gardener-extension-os-gardenlinux/values.yaml b/charts/gardener-extension-os-gardenlinux/values.yaml index 9e5f02865..b491c5ba5 100644 --- a/charts/gardener-extension-os-gardenlinux/values.yaml +++ b/charts/gardener-extension-os-gardenlinux/values.yaml @@ -23,6 +23,8 @@ disableControllers: [] gardener: version: "" + gardenlet: + featureGates: {} # settings for metrics, e.g. scraping by seed-prometheus metrics: diff --git a/cmd/gardener-extension-os-gardenlinux/app/app.go b/cmd/gardener-extension-os-gardenlinux/app/app.go index 5f2befbd2..1344885cd 100644 --- a/cmd/gardener-extension-os-gardenlinux/app/app.go +++ b/cmd/gardener-extension-os-gardenlinux/app/app.go @@ -119,6 +119,9 @@ func NewControllerCommand(ctx context.Context) *cobra.Command { heartbeatCtrlOpts.Completed().Apply(&heartbeat.DefaultAddOptions) reconcileOpts.Completed().Apply(&operatingsystemconfig.DefaultAddOptions.IgnoreOperationAnnotation) + // TODO(rfranzke): Remove the UseGardenerNodeAgent fields as soon as the general options no longer support + // the GardenletUsesGardenerNodeAgent field. + operatingsystemconfig.DefaultAddOptions.UseGardenerNodeAgent = generalOpts.Completed().GardenletUsesGardenerNodeAgent if err := controllerSwitches.Completed().AddToManager(ctx, mgr); err != nil { return fmt.Errorf("could not add controller to manager: %w", err) diff --git a/example/controller-registration.yaml b/example/controller-registration.yaml index 62403622d..144654923 100644 --- a/example/controller-registration.yaml +++ b/example/controller-registration.yaml @@ -5,7 +5,7 @@ metadata: name: os-gardenlinux type: helm providerConfig: - chart: H4sIAAAAAAAAA+0ca3PbNjKf+Sv2mOsk6ZikJL96uulMVdltPefaHstNp9PpZCASotiQBA8Apahp7rffAuBLkmVJdurUrTAZiwKxwGIX+8QqIeEBTSl36DtJUxGx1GHCCXVvHKX5O+/Zg1sL2/Hhof7Etvipn9v7B+3OYefoSPW3j/bb7Wdw+PCl17dcSMIBnnHG5F3j1r1/oi1cy393TOMkClPG6T3XUAw+OjhYyX9k+zz/O62Dg6Nn0PqoO13R/ub8fw5XRErKUwGSgWEzTMc0hWEexUGUhpAR/y0JqXCt53AzjgSIPMsYl/iARyOGMGZDSIj0xzh6DziNiYwmFOHkuNFP0gAnSGmIb1kKLzNOR9E7GsA0wnH/eOXCZRrPgKUaUqEEGeWAh5C6lnsyeDOQiBtO0WdJghO87g8giLiw3DCSnv5r0Lfc4W/c03/LjnHoqT/lVzFJvXqiIe4vz2AUxVRYn7timuHfIXmLf2WCz//Doa8Jj1gu4OzkFBfMOPuV+tJyo4ASz4zDLsudCJ8F1LM+NVc3b+vlvz8mXLozksT3XWOd/Hf2Dxflv7O/k/9HaSSLXlOu+N6FSdsiWVZ9tdtuy7YCKnweZVJ39eA7NAbgqyMBI8ZBjil8Wxyh4gHO1bGBywFUR8pKSUK7sPasWZNy7ZaLiz8hMXqybb38B8x3Q/aQNdbIf6d9sL/g/x0ftA538v8YzfPQDGYztJRjCS/9V4C6twWD3hUMTgEFnKT6CxmheYyIpOCzJCPpzIUemn4NJtDkC8onNHCNf6AsKeBnHPl4pNDC52lAja7ooTOBHwM2klOCnsa5GbIHExc6qDB8mkkgAlImEY4hCJ9GAmdLNfj5Wf/0AhFTK1ieh//KGW5ZpJq70GjQcVvwUg2wi1f2q3+rKWYsRz9lphaFHBeT1SYKhHB1tW0kQOpT46/IegFXzfFTMQcbSoLDCQJk+G3UHAhEFkjrNpYy63redDp1icbYZTz0CqIJr9irg1gXUD+k6KEoav83jzjueDgD1NcIQIaIa0ymmmEhp/hOOXMpTDk6Rcr5EgXB1TRBJCSPhrmcI1qJI269OQDJhkfA7g3gbGDD173B2WBPTfLj2c13lz/cwI+96+vexc3Z6QAur6F/eXFydnN2eYHfvoHexU/wn7OLkz2gkeIkkhOdPtwBohkpcuKJUXMNKJ1DoTQsIqN+NIp83Foa5uiCQsjQQqTaKaU8iYRiq9CeJU4TR0kktXMplvflWjgkZN1QKTt1jsUYHB9s1/Xw34SmAeMeupDjfOgir71SL9YPY3QTvRLc8VkqOYtj1JychopgemUXp53Xn+DCP1/6RIJZ6fXp9QDp86r4St8RJAT1Vk2n3C64zNSauOvBTEia9Fk6isJuc43bRyQ0YXyGDnUTH1sRAp1+7dMXlpym6gQJmKOPcfI1M4pORXdFUp9xju4v1EjDHNJW1px9Z8X/zG29/cfzlGFEhxrpvmtsnf/ptPZbx7v8z2O0bfj/BsN9VDTCldlWseC6+O/gcDH/0znaxX+P096/dwACOopSdIuiBLW2Dc6HDxaAehONYEzElc7UgC3GpHN41LXBfU3inApXj3clCaGCyHiUyhHYn4mvPhOLIznNmIgkGqW7pqAx+gC3TNi994RpoL40HvVzueuAZjGbJTSVRfhpKICulfAwJC7BVN+n5tbHb9vIf02o7dJBa+T/YH8p/7O/f9Tayf9jtGb+pzzxb6M06MJJxW0roZIERJIuCtSmqRwzUqAriMPfvwf3msaUoCN+UXYbAY3JEAVezQwKAfdtPkQXn0olyszbfDWMp2icoP/taa9zU6DlJaMUT0R6G9YGYVwIo16HTEiEuGNULGcqGkCHG9WRYDn3cZ5ycdePWR54cpbhfLW/bKnQRu2Z00mkUPsOvWfUYucqhOlCS7/RkZ0waBTKrejsszyVBhuB+PkIagios+3nDYo+lKb3IVCpMAqUGmdHtcKsYMxW7QqH8MgXrglDBj7HQ5mGJbURhRQjcxPYlbMA2gWGcGOaN/Zk34KQvQJCqGUUjOQ5rQc9V2aB5LGEAiul+zOGNshkAeowDVkQJXTF7Cp0KvBZ2KQOqmq0GkbJdMRz3Hs4/+53xFXD4VPGURmES8eZldt3kFU698NRfcQxm9JgM/gAeTkHUUqEoWXEOMpVPyZCXMxvV+gI1/lXq1UMVstjjN/zfSUVF1sRR/GToBPAK4I7Wyg407TDoSkZpX6cB7Uf5Tapqfuu8ji+YijCszmxNj5LVr1swvksSVBY6gPhgLc5cg44TkLeKQXl5xi1p9LB0B2/qMuuLxso1CdbPxeDB7MUJaCBjZpvTFG/DimRTqXhv1yl4GEFJE5Opw5KFTKPxI5QOAUrEargXA13VoANDNTSMkEklCJp5GjmZi5e9+u38Dv8iiIO9p69uFlzZegwk2BBQtfKaBW2BuSyhOhVAItzV2ws/M7mhJXEFO8WYQt14gzRVjskCFRe7cvuan1TAdN00jxL5rSfn/ZOTq/fnJ6f9lX27s1F7/vTwVWvf1qNBJioeb9BLddtdAKMIhoH13Q031v0XxE57lYmwK3Oi1VYgdquFYaziWnV2cXhINlPKhe2DPE7pEgDPKzQbi24+Z/auXoCbRv/P2MByg7P9WXgMA9CulkgsC7+PzpauP/Bh+Nd/P8oren/Z9r41BHAFQtOKn5/rfn9REKBbV2e0vlAU/lDWnj2MY5t/xk97I/K/23knw+Jf59CkDXyv985XpT/Tlvlf3fy/8c3x3Gspg7QPCa5HKMD/pu5znn7hT6UlVrox0gzyq9ZTO+pDp5YzM/zWDkhDgJG33KWZxp1p65vWYz3rTnnRQ31Dc2E/sLK2zoTy5jkwV2vPMRO5neOGEUpiaPfzBLoMw6LhZXOVp9xJMzDVCkw/ZRVT3mGPKTLG7Tt5Z2gq86pFM3n+yyOgGrJxuo1ShvhQSfo8i2sV0y6EbwhXEKyjzVH+U4btWL/9cFz8EgF6mAqY6JKom4l1HQtS3zGeIDkbkjmMl76+N66L6j4AfWC8NBltt66c5ukVlEe3E6ckiQP0lhfYwcKz99EceGGi9Cs5O8d9MJRywp+c+qIfKiKYrWiNPMM5jIzH91h/NSm8y/RtvH/ikzb1i7guvq//cPF+5/O0cHu/udR2kL975zg/qHB3lJSv5EuXnmPMuIscUisEoY0cEwK30H+oYkQTpFNdvRvE7rw4uf3tnq078iK7dkZZ5L5LLa79k3/yv7wy4uNkan26ZQhYrFqI0bExe15jT2XDMNVzaT2h21WzljgaFNUrVynYpEMkbo1qp1TPe1zuLk8ueyaAk0Nq6wJZwTNvyr5oxmnPlEVf+pmJmUQszSkHNGhAXYW9ZejXOacunBNEzbRRXuJqtYUTJUJiup0VAnqryZt9/hQ1RDAkNJU/TZEnYXA3Y7jJjHhLG9bXyHcdhjuNf/HO0BPzE8o0x/mmrIw/2dXS8mPx03KABhGWHWeuqC+ns5csa3JeJfcwYPfv9p5DMvtHvafGH9uczdgXf73sLOY/znotHf2/1HaXfa/dNyfRs73UTQlxk0sUUSZJ9ENe0uRgCMSC/q0tMw28j/JyL1+B7hG/lHu2wvy3z7e1X89Tlu4iFUsNmU4gS6BrJWDrY6+8NG7RHeqSrAMUTV07EJt4FgZ4YgrFvSKwZTfR3s4iMaGGqRyXbTfV91DNzFvVmbcUutZVWkslb1pH3JjDWTSU0V1h4Y1Pd+zgM75KYrGzcFuPa6uWV3gSem/NutD5vvMklVBi+6MaKOupXplSmRefP6iumlPorRnfN/mLb75+YhGvfrJkb2MjFtDuwZEBRQlhH3HNm6BXF2z+6kF5S/a1uv/iWHfA34Avk7/txZ//9lpHRx3dvr/MZqpXtPapCyl7wLN3dDnSsNXx6P4Lw/qn8PV+QVvSRVKEnZB+wxKiWaNorez0QWTV+r3oqhfrfo+Ad5/sCzUEAqTwv50QVVmrtBzyxqr1FZHB99Ha7WxreyTbVmNsi01aKHwrQuH2LmynKv095TrWdxcdE0h2m1Val3Yb4FlLRefdeHnXyyrpGvX3H0UNhdRfI4xuNT3ffqneEWQuwfUDV0QZa3scAY6C1JXoVrFSDXh84KmVU1rCVdRuyy6rYhuousvWl/sfoW/a7u2a7u2a7v2l2v/B33C7aoAUAAA + chart: H4sIAAAAAAAAA+0ca3PbNjKf+Sv2mOsk6ZjUw7bc001nqshu6znX9lhOOp1OJwOREMWGIngAKEVNc7/9FgBfelkPu07dCpOxKBCLXexiF7uLVQLCfRpT7tAPksYiZLHDhBPo3iiM0w+1Z/dudWwnx8f6E9v8p35uHB41msfNVkv1N1qHjcYzOL4/6vUtFZJwgGecMXnXuHXvn2gL1srfHdJoFAYx43RHHErAraOjlfJHsc/Kv1k/Omo9g/qDrnRF+5vL/zlcEykpjwVIBkbMMBnSGPppGPlhHEBCvPckoMK1nsPtMBQg0iRhXOIDbo0Igoj1YUSkN8TRB8BpRGQ4pggnh5V+Evs4QUwDfMtieJlwOgg/UB8mIY77xysXruJoCizWkIokSCgH3ITUtdzT3rueRNpwii4bjXCCt90e+CEXlhuEsqb/GvItt/8br+m/eccwqKk/+VcxjmvlRH1cX5rAIIyosL50xSTBv33yHv/KET7/D4e+JTxkqYDz0zNEmHD2K/Wk5YY+JTUzDrssdyw85tOa9bmlunlbr//dIeHSnZJRtCuOdfrfPDye1//m4V7/H6WRJHxLuZJ7G8YNiyRJ8dVuuHXb8qnweJhI3dWB7/EwAE9tCRgwDnJI4btsC2UPcKG2DVz1oNhSVkxGtA1r95o1znHXXUT+hNToybb1+u8zzw3YfXCs0f9m4+hwzv87Oaof7/X/MVqthsdgMsWTcijhpfcK0PbWode5ht4ZoIKTWH8hAzweQyIpeGyUkHjqQgePfg0m8MgXlI+p7xr/QJ2kgJ9R6OGWwhM+jX1qbEUHnQn86LGBnBD0NC7MkAMYu9BEg+HRRAIREDOJcAxB+CQUOFuswS/Ou2eXSJjCYNVq+C+fYQmSYu7MokHTrcNLNcDOXtmv/q2mmLIU/ZSpQgopIpPFIjKCELtaNjIg9qjxV2SJwFVz/JTNwfqS4HCCAAl+G1QHApEZ0boNpUzatdpkMnGJpthlPKhlTBO1bK0OUp1BvYnRQ1Hc/m8aclxxfwporxGA9JHWiEy0wAJO8Z1y5mKYcHSKlPMlMoarafxQSB72UznDtJxGXHp1ALINt4Dd6cF5z4bXnd5570BN8uP57fdXb27hx87NTefy9vysB1c30L26PD2/Pb+6xG/fQufyJ/jP+eXpAdBQSRLZiU4frgDJDBU7cceouXqUzpCQHywioV44CD1cWhyk6IJCwPCEiLVTSvkoFEqsQnuWOE0UjkKpnUuxuC7XwiEBawfK2Kl9LIbgeGC7bg3/jWnsM15DF3KY9l2UdS23i+XDEN3EWg7ueCyWnEURWk5OA8UwjdnFaWftJ7jwz5cekWAwvT276SF/XmVf6QeCjKC1VdMptwuuEoUTV92bCklHXRYPwqBdxbF8xIiOGJ+iQ12lx1aMQKdf+/TZSU5jtYMEzPDHOPlaGFmn4rtiqcc4R/cXSqJhhmgrqc6+P8X/zG39+Y/7KcGIDi3Srji2zv8064f1k33+5zHaNvJ/h+E+GhrhymSrWHBd/Hd0PJ//abb28d/jtI8fHQCfDsIY3aJwhFbbBufTJwtAvQkHMCTiWmdqwBZD0jxutW1w35IopcLV411JAiggEh7GcgD2F+KbL8T8SE4TJkKJh9JdU9AIfYAlE7Z3njD21ZfKo37OV+3TJGLTEY1lFn4aDqBrJWoYEudgqu9zS+vh2zb6XzJqu3TQGv0/arUac/p/eNhq7vX/MVo1/5Pv+Pdh7LfhtJC2NaKS+ESSNirUpqkcM1KgK4jDP34E94ZGlKAjfpl3GwWNSB8VXs0MigD3fdpHF59Kpcqstjk2jKdoNEL/u6a9zk2BFlGGMe6IeBnVhmBEhFGvQ8YkRNoxKpZTFQ2gw43mSLCUezhPjtz1Ipb6NTlNcL7SX7ZUaKPWzOk4VKR9j94zWrELFcK0oa7f6MhOGDIy45Z1dlkaS0ONQPo8BDUM1Nn2iwpH78vTXRiUG4yMpMreUS07VjBmK1aFQ3joCdeEIT2P46aMg5zbSEKMkbkJ7PJZAM8FhnBDmlbWZC8hyF4BIRQaBSN5SstBz9WxQNJIQkaVsv0JwzPIZAHKMA1FEI7oitlV6JTRM7dIHVSVZFUOJdMRzUjv/vLbbYurhsMnjKMxCBa2M8uX76CodO6Ho/mIIjah/mbwPspyBiLXCMPLkHHUq25EhLicXa7QEa7zr3o9G6zQY4zf8TylFZdbMUfJk6ATwAuGO1sYONO0w6E5GcZelPqlH+VWuan7rtMoumaowtMZtTY+S1K8rMJ5bDRCZSk3hAO1zYlzwHFG5IMyUF6KUXssHQzd8Yu67Pq6QkK5s/VzNrg3jVEDKtSo+YYU7WufEukUFv7rVQYeVkDi5HTioFah8EjkCEWTv5KgAs7VcOcZWM9ALaDxQ6EMSSVHMzNz9rpbvoXf4VdUcbAP7PnFmitDh5kECzK6NEarqDUgVzlEpwCYn7sQY+Z3VicsNCZ7Nw+bmROnj2e1Q3xf5dW+bq+2NwVwZn8X8GS7h0p3gIxOOf1O+XzuG0Hz+51L5tNOQGO5fB0I6qSCinJZMQI4REEsXdmOGBcsJuDXcVU/jAZfnHVOz27enV2cdVVG8t1l54ez3nWne1aMBBgrkr5Fy92udAIMQhr5N3Qw25v1XxM5bBfHmlvogDXH2cIZqFJadLZxOEj2k8rvLUL8DjHKVS28UZ8LXR7Y/9vG/0+Yj7rDU30Z2E/9gG4WCKyL/1utufsffDjZx/+P0qr+f6IPnzICuGb+aSHv11reTyQU2NblyZ0PPCrfxJlnH+HYxp/Rw35Q+W+j/7xPvF0KQdbo/2HzZF7/mw2V/93r/x/fHMexqjZAy5ikcogO+G/mOuf9V3pTFmahGyHPKL9hEd3RHDyxmJ+nkTqwHQQMv+MsTTTpTlnfMh/vWzMHvRrqGZ4J/YXlt3UmljHJg7te1ZA6md45YhDGJAp/MyjQZ+xniJXNVp9RKMzDRBkw/ZQUT2mCMqSLC7TtxZWgq86pFNXnXZAjoEJZwV6StBEddIzu0Ry+bNKN4A3jRiR5qDnyd/pQy9ZfbjwHt5SvNqY6TFRJ1FJGTdaKxGOM+8juimYu0qW379J1QSEPKBHCfdFsvXRnmaYWUR4sZ07OkntZrNfYgcrzNzFcuOAsjMnlewe/cNSigd+cOyLtq6JYbSjNPL2ZzMyDO4yf++j8S7Rt/L8s07a1C7iu/u/weL7+t9k6qu/9v8doc/W/M4r7hwZ7C0n9Srp45T3KgLORQyKVMKS+Y1L4DsoPjwjhZNlkR/82oQ0vfv5oq0f7jqzYgZ1wJpnHIrtt33av7U+/vNiYmGKdTh4iZlgrMSIit2ct9kziCLGaSe1P22BOmO/oo6jAXKZikQ2hujUqnVM97XO4vTq9apsCTQ2rThPOCB7/quSPJpx6RFX8qZuZmEHE4oByJIf62JnVXw5Sladz4YaO2FgX7Y1UtaZgqkxQFLujSFB/M264J8eqhgD6lMbqtyFqL/judhI3iQlncdn6CmHZZthp/ofbQE/MT8jTH+aaMjv+z68Xkh+Pm5QBMIKwypxuxn09nbliW5PxzqWDG797vfcYFtsO5z8x/tzmbsC6/O9xcz7/c9Rs7M//R2l3nf+54/40cr6PYikxbmIjxZRZFt2y9xQZOCCRoE/Lymyj/+OE7PQ7wDX6j3o/X//VOGnt9f9R2tylpRKxKcPxdQlkaRxstfWFh94lulNFgqWPpqFpZ2YDx8oQR1wzv5MNpnwX6+EgGRtakMJ10X5fcWdbpbxambGk1rOo0lgoe9M+5MYWyKSnsuoODWt6fmA+nfFTFI+rg91yXFmzOieT3H+t1ofM9hmURUGL7gxppa6leGVKZF58+aK4lR6Fccf4vtUbb/PzEU168ZMje5EYt4R2DYgKKHII+45lLIFcXbP7uRXlL9rW2/+xEd89fgC+zv436gu//z46aezt/2M0U72mrUleSt8GmrqBx5WFL7ZH9l8elD+HK/MLtQVTKEnQBu0zKCOaVIrezgeXTF6r34uifbXK+wT4+Mmy0EIoSrLzpw2qMnOFnVu0WLm1ah39EK61xrY6n2zLqpRtqUFzhW9tOMbOleVcub+nXM/s5qJtCtGWVam14bAOlrVYfNaGn3+xrJyvbXP3kZ256s6pKJMyy6jWShm2PccoXeobQf1jvSwMPgDqBi6IvJq2PwWdJynrVK1spJr3ecb1ouo1hyvkkZflFmIx8fdX9a/2v9Pft33bt33bt317Yu3/Ixz+lwBQAAA= values: image: tag: v0.22.0-dev From 2c728401f7288b492a97c6ac585885f528283811 Mon Sep 17 00:00:00 2001 From: Rafael Franzke Date: Fri, 24 Nov 2023 13:06:45 +0100 Subject: [PATCH 7/8] Address PR review feedback --- pkg/controller/operatingsystemconfig/actuator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/operatingsystemconfig/actuator.go b/pkg/controller/operatingsystemconfig/actuator.go index c931dfcf6..1466e2823 100644 --- a/pkg/controller/operatingsystemconfig/actuator.go +++ b/pkg/controller/operatingsystemconfig/actuator.go @@ -201,7 +201,7 @@ ExecStartPre=` + filePathKubeletCGroupDriverScript + ` FilePaths: []string{filePathFunctionsHelperScript, filePathKubeletCGroupDriverScript}, }) - // add scripts and dropins for containerd if activated + // add scripts and dropins for containerd filePathContainerdCGroupDriverScript := filepath.Join(gardenlinux.ScriptLocation, "containerd_cgroup_driver.sh") extensionFiles = append(extensionFiles, extensionsv1alpha1.File{ Path: filePathContainerdCGroupDriverScript, From 710b423065312274b665a3db5715f71cd933226c Mon Sep 17 00:00:00 2001 From: Rafael Franzke Date: Fri, 24 Nov 2023 14:29:10 +0100 Subject: [PATCH 8/8] Address PR review feedback --- pkg/controller/operatingsystemconfig/actuator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controller/operatingsystemconfig/actuator.go b/pkg/controller/operatingsystemconfig/actuator.go index 1466e2823..d98cbac5c 100644 --- a/pkg/controller/operatingsystemconfig/actuator.go +++ b/pkg/controller/operatingsystemconfig/actuator.go @@ -62,6 +62,7 @@ func (a *actuator) Reconcile(ctx context.Context, log logr.Logger, osc *extensio case extensionsv1alpha1.OperatingSystemConfigPurposeReconcile: extensionUnits, extensionFiles, err := a.handleReconcileOSC(osc) + // TODO(rfranzke): Change this to `return nil, nil, nil, nil, extensionUnits, extensionFiles, err` after UseGardenerNodeAgent feature gate has been removed. return cloudConfig, command, oscommonactuator.OperatingSystemConfigUnitNames(osc), oscommonactuator.OperatingSystemConfigFilePaths(osc), extensionUnits, extensionFiles, err default: