diff --git a/Makefile b/Makefile index 258f31e7c8..a376952709 100644 --- a/Makefile +++ b/Makefile @@ -74,6 +74,7 @@ verify: ## Verify code. Includes codegen, dependencies, linting, formatting, etc hack/validation/requirements.sh hack/validation/labels.sh hack/validation/resources.sh + hack/mutation/nodepool.sh hack/dependabot.sh @# Use perl instead of sed due to https://stackoverflow.com/questions/4247068/sed-command-with-i-option-failing-on-mac-but-works-on-linux @# We need to do this "sed replace" until controller-tools fixes this parameterized types issue: https://github.com/kubernetes-sigs/controller-tools/issues/756 diff --git a/go.mod b/go.mod index a6b2e6a03e..0e9ac5bf1a 100644 --- a/go.mod +++ b/go.mod @@ -1,39 +1,44 @@ module sigs.k8s.io/karpenter -go 1.21 +go 1.22.2 + +toolchain go1.22.5 require ( github.com/Pallinder/go-randomdata v1.2.0 github.com/avast/retry-go v3.0.0+incompatible + github.com/awslabs/operatorpkg v0.0.0-20240730231251-0fad555c25c5 github.com/docker/docker v25.0.2+incompatible - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.2 github.com/go-logr/zapr v1.3.0 github.com/imdario/mergo v0.3.16 github.com/mitchellh/hashstructure/v2 v2.0.2 - github.com/onsi/ginkgo/v2 v2.15.0 - github.com/onsi/gomega v1.31.1 + github.com/onsi/ginkgo/v2 v2.19.1 + github.com/onsi/gomega v1.34.0 github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/prometheus/client_golang v1.18.0 - github.com/prometheus/client_model v0.5.0 - github.com/samber/lo v1.39.0 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.1 + github.com/samber/lo v1.46.0 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.26.0 - golang.org/x/sync v0.6.0 - golang.org/x/text v0.14.0 + go.uber.org/zap v1.27.0 + golang.org/x/sync v0.7.0 + golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 - k8s.io/api v0.29.1 - k8s.io/apiextensions-apiserver v0.29.1 - k8s.io/apimachinery v0.29.1 - k8s.io/client-go v0.29.1 + k8s.io/api v0.30.3 + k8s.io/apiextensions-apiserver v0.30.1 + k8s.io/apimachinery v0.30.3 + k8s.io/client-go v0.30.3 k8s.io/cloud-provider v0.29.1 - k8s.io/component-base v0.29.1 + k8s.io/component-base v0.30.1 k8s.io/csi-translation-lib v0.29.1 - k8s.io/klog/v2 v2.120.1 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b + k8s.io/klog/v2 v2.130.1 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e knative.dev/pkg v0.0.0-20230712131115-7051d301e7f4 - sigs.k8s.io/controller-runtime v0.17.0 + sigs.k8s.io/controller-runtime v0.18.4 ) +require github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + require ( contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect @@ -44,24 +49,23 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gobuffalo/flect v0.2.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -69,12 +73,11 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/common v0.53.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/statsd_exporter v0.21.0 // indirect github.com/robfig/cron/v3 v3.0.1 @@ -84,22 +87,22 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/automaxprocs v1.4.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.15.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/api v0.124.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/grpc v1.58.3 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index d4eeef7fa6..0d33897e67 100644 --- a/go.sum +++ b/go.sum @@ -47,6 +47,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= +github.com/awslabs/operatorpkg v0.0.0-20240730231251-0fad555c25c5 h1:UxZRNmmwMmXZSm0oHXFS4L7JxshlawiWX6UmQcZ2Fvc= +github.com/awslabs/operatorpkg v0.0.0-20240730231251-0fad555c25c5/go.mod h1:NmFIDk+owvhQnz7hsFgdShl1CXywHQaVCQwhLS+CIhY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -79,10 +81,10 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= -github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -99,8 +101,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= @@ -110,8 +112,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/flect v0.2.4 h1:BSYA8+T60cdyq+vynaSUjqSVI9mDEg9ZfQUXKmfjo4I= github.com/gobuffalo/flect v0.2.4/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -146,9 +148,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -178,12 +179,12 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= @@ -194,7 +195,6 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -229,8 +229,6 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -244,10 +242,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= -github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= +github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= +github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= +github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -260,20 +258,20 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -289,8 +287,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= -github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/samber/lo v1.46.0 h1:w8G+oaCPgz1PoCJztqymCFaKwXt+5cCXn51uPxExFfQ= +github.com/samber/lo v1.46.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -335,8 +333,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -375,8 +373,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -409,16 +407,16 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -429,8 +427,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -467,19 +465,19 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -527,8 +525,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -625,9 +623,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -656,33 +653,33 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= +k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= +k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= +k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= k8s.io/cloud-provider v0.29.1 h1:bDLpOSpysWrtU2PCkvyP2sUTwRBa6MGCmxt68CRRW/8= k8s.io/cloud-provider v0.29.1/go.mod h1:u50Drm6AbuoKpsVbAstNiFHGgbSVHuJV4TWN5imdM2w= -k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= -k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= k8s.io/csi-translation-lib v0.29.1 h1:b2tYZnnHyrQVHG6GYel7egmVvKeIlX/xbTNm9ynBSUg= k8s.io/csi-translation-lib v0.29.1/go.mod h1:Zglui6PgFSew8ux50djwZ3PFK6eNrWktid66D7pHDDo= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/pkg v0.0.0-20230712131115-7051d301e7f4 h1:oO/BQJpVCFTSTMHF/S6u+nPtIvbHDTsvbPZvdCZAFjs= knative.dev/pkg v0.0.0-20230712131115-7051d301e7f4/go.mod h1:eXobTqst4aI7CNa6W7sG73VhEsHGWPSrkefeMTb++a0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.17.0 h1:fjJQf8Ukya+VjogLO6/bNX9HE6Y2xpsO5+fyS26ur/s= -sigs.k8s.io/controller-runtime v0.17.0/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/hack/mutation/nodepool.sh b/hack/mutation/nodepool.sh new file mode 100755 index 0000000000..c893fa34d5 --- /dev/null +++ b/hack/mutation/nodepool.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +# Remove Resource Field on the spec.template.spec.resources on v1 NodePool API +yq eval 'del(.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources)' -i pkg/apis/crds/karpenter.sh_nodepools.yaml \ No newline at end of file diff --git a/hack/validation/kubelet.sh b/hack/validation/kubelet.sh index ca31142ee3..5b8633e406 100755 --- a/hack/validation/kubelet.sh +++ b/hack/validation/kubelet.sh @@ -2,10 +2,10 @@ # The regular expression will be adding a check for if kublet.evictionHard and kubelet.evictionSoft are percentage or a quantity value # Adding validation for nodeclaim -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.kubelet.properties.evictionHard.additionalProperties.pattern = "^((\d{1,2}(\.\d{1,2})?|100(\.0{1,2})?)%||(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.kubelet.properties.evictionSoft.additionalProperties.pattern = "^((\d{1,2}(\.\d{1,2})?|100(\.0{1,2})?)%||(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml +yq eval '.spec.versions[1].schema.openAPIV3Schema.properties.spec.properties.kubelet.properties.evictionHard.additionalProperties.pattern = "^((\d{1,2}(\.\d{1,2})?|100(\.0{1,2})?)%||(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml +yq eval '.spec.versions[1].schema.openAPIV3Schema.properties.spec.properties.kubelet.properties.evictionSoft.additionalProperties.pattern = "^((\d{1,2}(\.\d{1,2})?|100(\.0{1,2})?)%||(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml # The regular expression will be adding a check for if kublet.evictionHard and kubelet.evictionSoft are percentage or a quantity value # Adding validation for nodepool -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.kubelet.properties.evictionHard.additionalProperties.pattern = "^((\d{1,2}(\.\d{1,2})?|100(\.0{1,2})?)%||(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.kubelet.properties.evictionSoft.additionalProperties.pattern = "^((\d{1,2}(\.\d{1,2})?|100(\.0{1,2})?)%||(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml +yq eval '.spec.versions[1].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.kubelet.properties.evictionHard.additionalProperties.pattern = "^((\d{1,2}(\.\d{1,2})?|100(\.0{1,2})?)%||(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml +yq eval '.spec.versions[1].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.kubelet.properties.evictionSoft.additionalProperties.pattern = "^((\d{1,2}(\.\d{1,2})?|100(\.0{1,2})?)%||(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?)$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml diff --git a/hack/validation/kwok-requirements.sh b/hack/validation/kwok-requirements.sh index 5012d6e4fc..f178c1cc41 100755 --- a/hack/validation/kwok-requirements.sh +++ b/hack/validation/kwok-requirements.sh @@ -3,11 +3,11 @@ # Adding validation for nodeclaim ## checking for restricted labels while filtering out well-known labels -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ +yq eval '.spec.versions[1].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ {"message": "label domain \"karpenter.kwok.sh\" is restricted", "rule": "self in [\"karpenter.kwok.sh/instance-cpu\", \"karpenter.kwok.sh/instance-memory\", \"karpenter.kwok.sh/instance-family\", \"karpenter.kwok.sh/instance-size\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.kwok.sh\")"}]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml # Adding validation for nodepool ## checking for restricted labels while filtering out well-known labels -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ - {"message": "label domain \"karpenter.kwok.sh\" is restricted", "rule": "self in [\"karpenter.kwok.sh/instance-cpu\", \"karpenter.kwok.sh/instance-memory\", \"karpenter.kwok.sh/instance-family\", \"karpenter.kwok.sh/instance-size\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.kwok.sh\")"}]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml +yq eval '.spec.versions[1].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ + {"message": "label domain \"karpenter.kwok.sh\" is restricted", "rule": "self in [\"karpenter.kwok.sh/instance-cpu\", \"karpenter.kwok.sh/instance-memory\", \"karpenter.kwok.sh/instance-family\", \"karpenter.kwok.sh/instance-size\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.kwok.sh\")"}]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml \ No newline at end of file diff --git a/hack/validation/labels.sh b/hack/validation/labels.sh index 1fdc33c212..12a5f0a658 100755 --- a/hack/validation/labels.sh +++ b/hack/validation/labels.sh @@ -1,14 +1,19 @@ # Labels Validation -# Adding validation for nodepool -## checking for restricted labels while filtering out well-known labels -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.maxProperties = 100' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.x-kubernetes-validations += [ - {"message": "label domain \"kubernetes.io\" is restricted", "rule": "self.all(x, x in [\"beta.kubernetes.io/instance-type\", \"failure-domain.beta.kubernetes.io/region\", \"beta.kubernetes.io/os\", \"beta.kubernetes.io/arch\", \"failure-domain.beta.kubernetes.io/zone\", \"topology.kubernetes.io/zone\", \"topology.kubernetes.io/region\", \"kubernetes.io/arch\", \"kubernetes.io/os\", \"node.kubernetes.io/windows-build\"] || x.find(\"^([^/]+)\").endsWith(\"node.kubernetes.io\") || x.find(\"^([^/]+)\").endsWith(\"node-restriction.kubernetes.io\") || !x.find(\"^([^/]+)\").endsWith(\"kubernetes.io\"))"}, - {"message": "label domain \"k8s.io\" is restricted", "rule": "self.all(x, x.find(\"^([^/]+)\").endsWith(\"kops.k8s.io\") || !x.find(\"^([^/]+)\").endsWith(\"k8s.io\"))"}, - {"message": "label domain \"karpenter.sh\" is restricted", "rule": "self.all(x, x in [\"karpenter.sh/capacity-type\", \"karpenter.sh/nodepool\"] || !x.find(\"^([^/]+)\").endsWith(\"karpenter.sh\"))"}, - {"message": "label \"karpenter.sh/nodepool\" is restricted", "rule": "self.all(x, x != \"karpenter.sh/nodepool\")"}, - {"message": "label \"kubernetes.io/hostname\" is restricted", "rule": "self.all(x, x != \"kubernetes.io/hostname\")"}]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -# # ## Vaild requirement value check -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.additionalProperties.maxLength = 63' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.additionalProperties.pattern = "^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$" ' -i pkg/apis/crds/karpenter.sh_nodepools.yaml \ No newline at end of file +# Adding validation to both v1 and v1beta1 APIs +# Version = 0 // v1 API +# Version = 1 // v1beta1 API +for Version in $(seq 0 1); do + # NodePool Validation: + # checking for restricted labels while filtering out well-known labels + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.maxProperties = 100' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.x-kubernetes-validations += [ + {"message": "label domain \"kubernetes.io\" is restricted", "rule": "self.all(x, x in [\"beta.kubernetes.io/instance-type\", \"failure-domain.beta.kubernetes.io/region\", \"beta.kubernetes.io/os\", \"beta.kubernetes.io/arch\", \"failure-domain.beta.kubernetes.io/zone\", \"topology.kubernetes.io/zone\", \"topology.kubernetes.io/region\", \"kubernetes.io/arch\", \"kubernetes.io/os\", \"node.kubernetes.io/windows-build\"] || x.find(\"^([^/]+)\").endsWith(\"node.kubernetes.io\") || x.find(\"^([^/]+)\").endsWith(\"node-restriction.kubernetes.io\") || !x.find(\"^([^/]+)\").endsWith(\"kubernetes.io\"))"}, + {"message": "label domain \"k8s.io\" is restricted", "rule": "self.all(x, x.find(\"^([^/]+)\").endsWith(\"kops.k8s.io\") || !x.find(\"^([^/]+)\").endsWith(\"k8s.io\"))"}, + {"message": "label domain \"karpenter.sh\" is restricted", "rule": "self.all(x, x in [\"karpenter.sh/capacity-type\", \"karpenter.sh/nodepool\"] || !x.find(\"^([^/]+)\").endsWith(\"karpenter.sh\"))"}, + {"message": "label \"karpenter.sh/nodepool\" is restricted", "rule": "self.all(x, x != \"karpenter.sh/nodepool\")"}, + {"message": "label \"kubernetes.io/hostname\" is restricted", "rule": "self.all(x, x != \"kubernetes.io/hostname\")"}]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + # Vaild requirement value check + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.additionalProperties.maxLength = 63' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.additionalProperties.pattern = "^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml +done \ No newline at end of file diff --git a/hack/validation/requirements.sh b/hack/validation/requirements.sh index e0ca78e259..414a7e64dd 100755 --- a/hack/validation/requirements.sh +++ b/hack/validation/requirements.sh @@ -1,36 +1,36 @@ # Requirements Validation -# Adding validation for nodeclaim +for Version in $(seq 0 1); do + # NodeClaim Validation: + ## Qualified name for requirement keys + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.maxLength = 316' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + ## checking for restricted labels while filtering out well-known labels + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ + {"message": "label domain \"kubernetes.io\" is restricted", "rule": "self in [\"beta.kubernetes.io/instance-type\", \"failure-domain.beta.kubernetes.io/region\", \"beta.kubernetes.io/os\", \"beta.kubernetes.io/arch\", \"failure-domain.beta.kubernetes.io/zone\", \"topology.kubernetes.io/zone\", \"topology.kubernetes.io/region\", \"node.kubernetes.io/instance-type\", \"kubernetes.io/arch\", \"kubernetes.io/os\", \"node.kubernetes.io/windows-build\"] || self.find(\"^([^/]+)\").endsWith(\"node.kubernetes.io\") || self.find(\"^([^/]+)\").endsWith(\"node-restriction.kubernetes.io\") || !self.find(\"^([^/]+)\").endsWith(\"kubernetes.io\")"}, + {"message": "label domain \"k8s.io\" is restricted", "rule": "self.find(\"^([^/]+)\").endsWith(\"kops.k8s.io\") || !self.find(\"^([^/]+)\").endsWith(\"k8s.io\")"}, + {"message": "label domain \"karpenter.sh\" is restricted", "rule": "self in [\"karpenter.sh/capacity-type\", \"karpenter.sh/nodepool\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.sh\")"}, + {"message": "label \"kubernetes.io/hostname\" is restricted", "rule": "self != \"kubernetes.io/hostname\""}]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + ## operator enum values + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.operator.enum += ["In","NotIn","Exists","DoesNotExist","Gt","Lt"]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + ## Valid requirement value check + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.values.maxLength = 63' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.values.pattern = "^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -## Qualified name for requirement keys -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.maxLength = 316' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -## checking for restricted labels while filtering out well-known labels -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ - {"message": "label domain \"kubernetes.io\" is restricted", "rule": "self in [\"beta.kubernetes.io/instance-type\", \"failure-domain.beta.kubernetes.io/region\", \"beta.kubernetes.io/os\", \"beta.kubernetes.io/arch\", \"failure-domain.beta.kubernetes.io/zone\", \"topology.kubernetes.io/zone\", \"topology.kubernetes.io/region\", \"node.kubernetes.io/instance-type\", \"kubernetes.io/arch\", \"kubernetes.io/os\", \"node.kubernetes.io/windows-build\"] || self.find(\"^([^/]+)\").endsWith(\"node.kubernetes.io\") || self.find(\"^([^/]+)\").endsWith(\"node-restriction.kubernetes.io\") || !self.find(\"^([^/]+)\").endsWith(\"kubernetes.io\")"}, - {"message": "label domain \"k8s.io\" is restricted", "rule": "self.find(\"^([^/]+)\").endsWith(\"kops.k8s.io\") || !self.find(\"^([^/]+)\").endsWith(\"k8s.io\")"}, - {"message": "label domain \"karpenter.sh\" is restricted", "rule": "self in [\"karpenter.sh/capacity-type\", \"karpenter.sh/nodepool\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.sh\")"}, - {"message": "label \"kubernetes.io/hostname\" is restricted", "rule": "self != \"kubernetes.io/hostname\""}]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -## operator enum values -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.operator.enum += ["In","NotIn","Exists","DoesNotExist","Gt","Lt"]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -## Vaild requirement value check -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.values.maxLength = 63' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.values.pattern = "^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$" ' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml - -# Adding validation for nodepool - -## Qualified name for requirement keys -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.maxLength = 316' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -## checking for restricted labels while filtering out well-known labels -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ - {"message": "label domain \"kubernetes.io\" is restricted", "rule": "self in [\"beta.kubernetes.io/instance-type\", \"failure-domain.beta.kubernetes.io/region\", \"beta.kubernetes.io/os\", \"beta.kubernetes.io/arch\", \"failure-domain.beta.kubernetes.io/zone\", \"topology.kubernetes.io/zone\", \"topology.kubernetes.io/region\", \"node.kubernetes.io/instance-type\", \"kubernetes.io/arch\", \"kubernetes.io/os\", \"node.kubernetes.io/windows-build\"] || self.find(\"^([^/]+)\").endsWith(\"node.kubernetes.io\") || self.find(\"^([^/]+)\").endsWith(\"node-restriction.kubernetes.io\") || !self.find(\"^([^/]+)\").endsWith(\"kubernetes.io\")"}, - {"message": "label domain \"k8s.io\" is restricted", "rule": "self.find(\"^([^/]+)\").endsWith(\"kops.k8s.io\") || !self.find(\"^([^/]+)\").endsWith(\"k8s.io\")"}, - {"message": "label domain \"karpenter.sh\" is restricted", "rule": "self in [\"karpenter.sh/capacity-type\", \"karpenter.sh/nodepool\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.sh\")"}, - {"message": "label \"karpenter.sh/nodepool\" is restricted", "rule": "self != \"karpenter.sh/nodepool\""}, - {"message": "label \"kubernetes.io/hostname\" is restricted", "rule": "self != \"kubernetes.io/hostname\""}]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -## operator enum values -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.operator.enum += ["In","NotIn","Exists","DoesNotExist","Gt","Lt"]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -## Vaild requirement value check -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.values.maxLength = 63' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.values.pattern = "^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$" ' -i pkg/apis/crds/karpenter.sh_nodepools.yaml \ No newline at end of file + # NodePool Validation: + ## Qualified name for requirement keys + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.maxLength = 316' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + ## checking for restricted labels while filtering out well-known labels + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ + {"message": "label domain \"kubernetes.io\" is restricted", "rule": "self in [\"beta.kubernetes.io/instance-type\", \"failure-domain.beta.kubernetes.io/region\", \"beta.kubernetes.io/os\", \"beta.kubernetes.io/arch\", \"failure-domain.beta.kubernetes.io/zone\", \"topology.kubernetes.io/zone\", \"topology.kubernetes.io/region\", \"node.kubernetes.io/instance-type\", \"kubernetes.io/arch\", \"kubernetes.io/os\", \"node.kubernetes.io/windows-build\"] || self.find(\"^([^/]+)\").endsWith(\"node.kubernetes.io\") || self.find(\"^([^/]+)\").endsWith(\"node-restriction.kubernetes.io\") || !self.find(\"^([^/]+)\").endsWith(\"kubernetes.io\")"}, + {"message": "label domain \"k8s.io\" is restricted", "rule": "self.find(\"^([^/]+)\").endsWith(\"kops.k8s.io\") || !self.find(\"^([^/]+)\").endsWith(\"k8s.io\")"}, + {"message": "label domain \"karpenter.sh\" is restricted", "rule": "self in [\"karpenter.sh/capacity-type\", \"karpenter.sh/nodepool\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.sh\")"}, + {"message": "label \"karpenter.sh/nodepool\" is restricted", "rule": "self != \"karpenter.sh/nodepool\""}, + {"message": "label \"kubernetes.io/hostname\" is restricted", "rule": "self != \"kubernetes.io/hostname\""}]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + ## operator enum values + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.operator.enum += ["In","NotIn","Exists","DoesNotExist","Gt","Lt"]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + ## Valid requirement value check + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.values.maxLength = 63' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.values.pattern = "^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml +done \ No newline at end of file diff --git a/hack/validation/resources.sh b/hack/validation/resources.sh index ee1ba1e268..29621c79e4 100755 --- a/hack/validation/resources.sh +++ b/hack/validation/resources.sh @@ -1,2 +1,2 @@ # Adding validation for nodepool.spec.template.spec.resources -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.maxProperties = 0' -i pkg/apis/crds/karpenter.sh_nodepools.yaml +yq eval '.spec.versions[1].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.maxProperties = 0' -i pkg/apis/crds/karpenter.sh_nodepools.yaml diff --git a/hack/validation/taint.sh b/hack/validation/taint.sh index 6c574f730b..3f074f5d75 100755 --- a/hack/validation/taint.sh +++ b/hack/validation/taint.sh @@ -1,33 +1,32 @@ # Taints Validation -# Adding validation for nodepool - -## Taint -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.taints.items.properties.key.minLength = 1' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.taints.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.taints.items.properties.value.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.taints.items.properties.effect.enum += ["NoSchedule","PreferNoSchedule","NoExecute"]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml - - -## Startup-Taint -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.startupTaints.items.properties.key.minLength = 1' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.startupTaints.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.startupTaints.items.properties.value.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.startupTaints.items.properties.effect.enum += ["NoSchedule","PreferNoSchedule","NoExecute"]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml - - -# Adding validation for nodepool - -## Taint -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.taints.items.properties.key.minLength = 1' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.taints.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.taints.items.properties.value.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.taints.items.properties.effect.enum += ["NoSchedule","PreferNoSchedule","NoExecute"]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml - -## Startup-Taint -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.startupTaints.items.properties.key.minLength = 1' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.startupTaints.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.startupTaints.items.properties.value.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml -yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.startupTaints.items.properties.effect.enum += ["NoSchedule","PreferNoSchedule","NoExecute"]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml - - +# Adding validation to both v1 and v1beta1 APIs +# Version = 0 // v1 API +# Version = 1 // v1beta1 API +for Version in $(seq 0 1); do + # NodeClaim Validation: + ## Taint + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.taints.items.properties.key.minLength = 1' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.taints.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.taints.items.properties.value.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.taints.items.properties.effect.enum += ["NoSchedule","PreferNoSchedule","NoExecute"]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + + ## Startup-Taint + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.startupTaints.items.properties.key.minLength = 1' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.startupTaints.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.startupTaints.items.properties.value.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.startupTaints.items.properties.effect.enum += ["NoSchedule","PreferNoSchedule","NoExecute"]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml + + # Nodepool Validation: + ## Taint + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.taints.items.properties.key.minLength = 1' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.taints.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.taints.items.properties.value.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.taints.items.properties.effect.enum += ["NoSchedule","PreferNoSchedule","NoExecute"]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + + ## Startup-Taint + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.startupTaints.items.properties.key.minLength = 1' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.startupTaints.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.startupTaints.items.properties.value.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml + yqVersion="$Version" yq eval '.spec.versions[env(yqVersion)].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.startupTaints.items.properties.effect.enum += ["NoSchedule","PreferNoSchedule","NoExecute"]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml +done diff --git a/pkg/apis/apis.go b/pkg/apis/apis.go index 8da2729c7f..579fd08d23 100644 --- a/pkg/apis/apis.go +++ b/pkg/apis/apis.go @@ -20,9 +20,10 @@ import ( _ "embed" "github.com/samber/lo" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" + v1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/utils/functional" ) @@ -30,6 +31,7 @@ import ( var ( // Builder includes all types within the apis package Builder = runtime.NewSchemeBuilder( + v1.SchemeBuilder.AddToScheme, v1beta1.SchemeBuilder.AddToScheme, ) // AddToScheme may be used to add all resources defined in the project to a Scheme @@ -42,8 +44,8 @@ var ( NodePoolCRD []byte //go:embed crds/karpenter.sh_nodeclaims.yaml NodeClaimCRD []byte - CRDs = []*v1.CustomResourceDefinition{ - lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](NodePoolCRD)), - lo.Must(functional.Unmarshal[v1.CustomResourceDefinition](NodeClaimCRD)), + CRDs = []*apiextensionsv1.CustomResourceDefinition{ + lo.Must(functional.Unmarshal[apiextensionsv1.CustomResourceDefinition](NodePoolCRD)), + lo.Must(functional.Unmarshal[apiextensionsv1.CustomResourceDefinition](NodeClaimCRD)), } ) diff --git a/pkg/apis/crds/karpenter.sh_nodeclaims.yaml b/pkg/apis/crds/karpenter.sh_nodeclaims.yaml index d0a19b000b..4b273f3a0e 100644 --- a/pkg/apis/crds/karpenter.sh_nodeclaims.yaml +++ b/pkg/apis/crds/karpenter.sh_nodeclaims.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: nodeclaims.karpenter.sh spec: group: karpenter.sh @@ -16,6 +16,379 @@ spec: singular: nodeclaim scope: Cluster versions: + - additionalPrinterColumns: + - jsonPath: .metadata.labels.node\.kubernetes\.io/instance-type + name: Type + type: string + - jsonPath: .metadata.labels.karpenter\.sh/capacity-type + name: Capacity + type: string + - jsonPath: .metadata.labels.topology\.kubernetes\.io/zone + name: Zone + type: string + - jsonPath: .status.nodeName + name: Node + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.providerID + name: ID + priority: 1 + type: string + - jsonPath: .metadata.labels.karpenter\.sh/nodepool + name: NodePool + priority: 1 + type: string + - jsonPath: .spec.nodeClassRef.name + name: NodeClass + priority: 1 + type: string + name: v1 + schema: + openAPIV3Schema: + description: NodeClaim is the Schema for the NodeClaims API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NodeClaimSpec describes the desired state of the NodeClaim + properties: + expireAfter: + default: 720h + description: |- + ExpireAfter is the duration the controller will wait + before terminating a node, measured from when the node is created. This + is useful to implement features like eventually consistent node upgrade, + memory leak protection, and disruption testing. + pattern: ^(([0-9]+(s|m|h))+)|(Never)$ + type: string + nodeClassRef: + description: NodeClassRef is a reference to an object that defines provider specific configuration + properties: + group: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - group + - kind + - name + type: object + requirements: + description: Requirements are layered with GetLabels and applied to every node. + items: + description: |- + A node selector requirement with min values is a selector that contains values, a key, an operator that relates the key and values + and minValues that represent the requirement to have at least that many values. + properties: + key: + description: The label key that the selector applies to. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + x-kubernetes-validations: + - message: label domain "kubernetes.io" is restricted + rule: self in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "node.kubernetes.io/instance-type", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || self.find("^([^/]+)").endsWith("node.kubernetes.io") || self.find("^([^/]+)").endsWith("node-restriction.kubernetes.io") || !self.find("^([^/]+)").endsWith("kubernetes.io") + - message: label domain "k8s.io" is restricted + rule: self.find("^([^/]+)").endsWith("kops.k8s.io") || !self.find("^([^/]+)").endsWith("k8s.io") + - message: label domain "karpenter.sh" is restricted + rule: self in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !self.find("^([^/]+)").endsWith("karpenter.sh") + - message: label "kubernetes.io/hostname" is restricted + rule: self != "kubernetes.io/hostname" + minValues: + description: |- + This field is ALPHA and can be dropped or replaced at any time + MinValues is the minimum number of unique values required to define the flexibility of the specific requirement. + maximum: 50 + minimum: 1 + type: integer + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + required: + - key + - operator + type: object + maxItems: 100 + type: array + x-kubernetes-validations: + - message: requirements with operator 'In' must have a value defined + rule: 'self.all(x, x.operator == ''In'' ? x.values.size() != 0 : true)' + - message: requirements operator 'Gt' or 'Lt' must have a single positive integer value + rule: 'self.all(x, (x.operator == ''Gt'' || x.operator == ''Lt'') ? (x.values.size() == 1 && int(x.values[0]) >= 0) : true)' + - message: requirements with 'minValues' must have at least that many values specified in the 'values' field + rule: 'self.all(x, (x.operator == ''In'' && has(x.minValues)) ? x.values.size() >= x.minValues : true)' + resources: + description: Resources models the resource requirements for the NodeClaim to launch + properties: + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum required resources for the NodeClaim to launch + type: object + type: object + startupTaints: + description: |- + StartupTaints are taints that are applied to nodes upon startup which are expected to be removed automatically + within a short period of time, typically by a DaemonSet that tolerates the taint. These are commonly used by + daemonsets to allow initialization and enforce startup ordering. StartupTaints are ignored for provisioning + purposes in that pods are not required to tolerate a StartupTaint in order to have nodes provisioned for them. + items: + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. + properties: + effect: + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + key: + description: Required. The taint key to be applied to a node. + type: string + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + timeAdded: + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. + format: date-time + type: string + value: + description: The taint value corresponding to the taint key. + type: string + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + required: + - effect + - key + type: object + type: array + taints: + description: Taints will be applied to the NodeClaim's node. + items: + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. + properties: + effect: + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + key: + description: Required. The taint key to be applied to a node. + type: string + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + timeAdded: + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. + format: date-time + type: string + value: + description: The taint value corresponding to the taint key. + type: string + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + required: + - effect + - key + type: object + type: array + terminationGracePeriod: + description: |- + TerminationGracePeriod is the maximum duration the controller will wait before forcefully deleting the pods on a node, measured from when deletion is first initiated. + + + Warning: this feature takes precedence over a Pod's terminationGracePeriodSeconds value, and bypasses any blocked PDBs or the karpenter.sh/do-not-disrupt annotation. + + + This field is intended to be used by cluster administrators to enforce that nodes can be cycled within a given time period. + When set, drifted nodes will begin draining even if there are pods blocking eviction. Draining will respect PDBs and the do-not-disrupt annotation until the TGP is reached. + + + Karpenter will preemptively delete pods so their terminationGracePeriodSeconds align with the node's terminationGracePeriod. + If a pod would be terminated without being granted its full terminationGracePeriodSeconds prior to the node timeout, + that pod will be deleted at T = node timeout - pod terminationGracePeriodSeconds. + + + The feature can also be used to allow maximum time limits for long-running jobs which can delay node termination with preStop hooks. + If left undefined, the controller will wait indefinitely for pods to be drained. + pattern: ^([0-9]+(s|m|h))+$ + type: string + required: + - nodeClassRef + - requirements + type: object + x-kubernetes-validations: + - message: spec is immutable + rule: self == oldSelf + status: + description: NodeClaimStatus defines the observed state of NodeClaim + properties: + allocatable: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Allocatable is the estimated allocatable capacity of the node + type: object + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Capacity is the estimated full capacity of the node + type: object + conditions: + description: Conditions contains signals for health and readiness + items: + description: Condition aliases the upstream type and adds additional helper methods + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + imageID: + description: ImageID is an identifier for the image that runs on the node + type: string + lastPodEventTime: + description: |- + LastPodEventTime is updated with the last time a pod was scheduled + or removed from the node. A pod going terminal or terminating + is also considered as removed. + format: date-time + type: string + nodeName: + description: NodeName is the name of the corresponding node object + type: string + providerID: + description: ProviderID of the corresponding node object + type: string + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} - additionalPrinterColumns: - jsonPath: .metadata.labels.node\.kubernetes\.io/instance-type name: Type @@ -247,6 +620,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic maxLength: 63 pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ required: diff --git a/pkg/apis/crds/karpenter.sh_nodepools.yaml b/pkg/apis/crds/karpenter.sh_nodepools.yaml index 759029d716..9fe9a68cc1 100644 --- a/pkg/apis/crds/karpenter.sh_nodepools.yaml +++ b/pkg/apis/crds/karpenter.sh_nodepools.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: nodepools.karpenter.sh spec: group: karpenter.sh @@ -16,6 +16,492 @@ spec: singular: nodepool scope: Cluster versions: + - additionalPrinterColumns: + - jsonPath: .spec.template.spec.nodeClassRef.name + name: NodeClass + type: string + - jsonPath: .status.resources.nodes + name: Nodes + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.weight + name: Weight + priority: 1 + type: integer + - jsonPath: .status.resources.cpu + name: CPU + priority: 1 + type: string + - jsonPath: .status.resources.memory + name: Memory + priority: 1 + type: string + name: v1 + schema: + openAPIV3Schema: + description: NodePool is the Schema for the NodePools API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + NodePoolSpec is the top level nodepool specification. Nodepools + launch nodes in response to pods that are unschedulable. A single nodepool + is capable of managing a diverse set of nodes. Node properties are determined + from a combination of nodepool and pod scheduling constraints. + properties: + disruption: + description: Disruption contains the parameters that relate to Karpenter's disruption logic + properties: + budgets: + default: + - nodes: 10% + description: |- + Budgets is a list of Budgets. + If there are multiple active budgets, Karpenter uses + the most restrictive value. If left undefined, + this will default to one budget with a value to 10%. + items: + description: |- + Budget defines when Karpenter will restrict the + number of Node Claims that can be terminating simultaneously. + properties: + duration: + description: |- + Duration determines how long a Budget is active since each Schedule hit. + Only minutes and hours are accepted, as cron does not work in seconds. + If omitted, the budget is always active. + This is required if Schedule is set. + This regex has an optional 0s at the end since the duration.String() always adds + a 0s at the end. + pattern: ^((([0-9]+(h|m))|([0-9]+h[0-9]+m))(0s)?)$ + type: string + nodes: + default: 10% + description: |- + Nodes dictates the maximum number of NodeClaims owned by this NodePool + that can be terminating at once. This is calculated by counting nodes that + have a deletion timestamp set, or are actively being deleted by Karpenter. + This field is required when specifying a budget. + This cannot be of type intstr.IntOrString since kubebuilder doesn't support pattern + checking for int nodes for IntOrString nodes. + Ref: https://github.com/kubernetes-sigs/controller-tools/blob/55efe4be40394a288216dab63156b0a64fb82929/pkg/crd/markers/validation.go#L379-L388 + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + type: string + reasons: + description: |- + Reasons is a list of disruption methods that this budget applies to. If Reasons is not set, this budget applies to all methods. + Otherwise, this will apply to each reason defined. + allowed reasons are Underutilized, Empty, and Drifted. + items: + description: DisruptionReason defines valid reasons for disruption budgets. + enum: + - Underutilized + - Empty + - Drifted + type: string + type: array + schedule: + description: |- + Schedule specifies when a budget begins being active, following + the upstream cronjob syntax. If omitted, the budget is always active. + Timezones are not supported. + This field is required if Duration is set. + pattern: ^(@(annually|yearly|monthly|weekly|daily|midnight|hourly))|((.+)\s(.+)\s(.+)\s(.+)\s(.+))$ + type: string + required: + - nodes + type: object + maxItems: 50 + type: array + x-kubernetes-validations: + - message: '''schedule'' must be set with ''duration''' + rule: self.all(x, has(x.schedule) == has(x.duration)) + consolidateAfter: + description: |- + ConsolidateAfter is the duration the controller will wait + before attempting to terminate nodes that are underutilized. + Refer to ConsolidationPolicy for how underutilization is considered. + pattern: ^(([0-9]+(s|m|h))+)|(Never)$ + type: string + consolidationPolicy: + default: WhenUnderutilized + description: |- + ConsolidationPolicy describes which nodes Karpenter can disrupt through its consolidation + algorithm. This policy defaults to "WhenUnderutilized" if not specified + enum: + - WhenEmpty + - WhenUnderutilized + type: string + required: + - consolidateAfter + type: object + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Limits define a set of bounds for provisioning capacity. + type: object + template: + description: |- + Template contains the template of possibilities for the provisioning logic to launch a NodeClaim with. + NodeClaims launched from this NodePool will often be further constrained than the template specifies. + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations + type: object + labels: + additionalProperties: + type: string + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + type: object + maxProperties: 100 + x-kubernetes-validations: + - message: label domain "kubernetes.io" is restricted + rule: self.all(x, x in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || x.find("^([^/]+)").endsWith("node.kubernetes.io") || x.find("^([^/]+)").endsWith("node-restriction.kubernetes.io") || !x.find("^([^/]+)").endsWith("kubernetes.io")) + - message: label domain "k8s.io" is restricted + rule: self.all(x, x.find("^([^/]+)").endsWith("kops.k8s.io") || !x.find("^([^/]+)").endsWith("k8s.io")) + - message: label domain "karpenter.sh" is restricted + rule: self.all(x, x in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !x.find("^([^/]+)").endsWith("karpenter.sh")) + - message: label "karpenter.sh/nodepool" is restricted + rule: self.all(x, x != "karpenter.sh/nodepool") + - message: label "kubernetes.io/hostname" is restricted + rule: self.all(x, x != "kubernetes.io/hostname") + type: object + spec: + description: NodeClaimSpec describes the desired state of the NodeClaim + properties: + expireAfter: + default: 720h + description: |- + ExpireAfter is the duration the controller will wait + before terminating a node, measured from when the node is created. This + is useful to implement features like eventually consistent node upgrade, + memory leak protection, and disruption testing. + pattern: ^(([0-9]+(s|m|h))+)|(Never)$ + type: string + nodeClassRef: + description: NodeClassRef is a reference to an object that defines provider specific configuration + properties: + group: + description: API version of the referent + type: string + kind: + description: 'Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"' + type: string + name: + description: 'Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + required: + - group + - kind + - name + type: object + requirements: + description: Requirements are layered with GetLabels and applied to every node. + items: + description: |- + A node selector requirement with min values is a selector that contains values, a key, an operator that relates the key and values + and minValues that represent the requirement to have at least that many values. + properties: + key: + description: The label key that the selector applies to. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + x-kubernetes-validations: + - message: label domain "kubernetes.io" is restricted + rule: self in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "node.kubernetes.io/instance-type", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || self.find("^([^/]+)").endsWith("node.kubernetes.io") || self.find("^([^/]+)").endsWith("node-restriction.kubernetes.io") || !self.find("^([^/]+)").endsWith("kubernetes.io") + - message: label domain "k8s.io" is restricted + rule: self.find("^([^/]+)").endsWith("kops.k8s.io") || !self.find("^([^/]+)").endsWith("k8s.io") + - message: label domain "karpenter.sh" is restricted + rule: self in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !self.find("^([^/]+)").endsWith("karpenter.sh") + - message: label "karpenter.sh/nodepool" is restricted + rule: self != "karpenter.sh/nodepool" + - message: label "kubernetes.io/hostname" is restricted + rule: self != "kubernetes.io/hostname" + minValues: + description: |- + This field is ALPHA and can be dropped or replaced at any time + MinValues is the minimum number of unique values required to define the flexibility of the specific requirement. + maximum: 50 + minimum: 1 + type: integer + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + enum: + - In + - NotIn + - Exists + - DoesNotExist + - Gt + - Lt + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxLength: 63 + pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ + required: + - key + - operator + type: object + maxItems: 100 + type: array + x-kubernetes-validations: + - message: requirements with operator 'In' must have a value defined + rule: 'self.all(x, x.operator == ''In'' ? x.values.size() != 0 : true)' + - message: requirements operator 'Gt' or 'Lt' must have a single positive integer value + rule: 'self.all(x, (x.operator == ''Gt'' || x.operator == ''Lt'') ? (x.values.size() == 1 && int(x.values[0]) >= 0) : true)' + - message: requirements with 'minValues' must have at least that many values specified in the 'values' field + rule: 'self.all(x, (x.operator == ''In'' && has(x.minValues)) ? x.values.size() >= x.minValues : true)' + startupTaints: + description: |- + StartupTaints are taints that are applied to nodes upon startup which are expected to be removed automatically + within a short period of time, typically by a DaemonSet that tolerates the taint. These are commonly used by + daemonsets to allow initialization and enforce startup ordering. StartupTaints are ignored for provisioning + purposes in that pods are not required to tolerate a StartupTaint in order to have nodes provisioned for them. + items: + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. + properties: + effect: + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + key: + description: Required. The taint key to be applied to a node. + type: string + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + timeAdded: + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. + format: date-time + type: string + value: + description: The taint value corresponding to the taint key. + type: string + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + required: + - effect + - key + type: object + type: array + taints: + description: Taints will be applied to the NodeClaim's node. + items: + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. + properties: + effect: + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + enum: + - NoSchedule + - PreferNoSchedule + - NoExecute + key: + description: Required. The taint key to be applied to a node. + type: string + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + timeAdded: + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. + format: date-time + type: string + value: + description: The taint value corresponding to the taint key. + type: string + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + required: + - effect + - key + type: object + type: array + terminationGracePeriod: + description: |- + TerminationGracePeriod is the maximum duration the controller will wait before forcefully deleting the pods on a node, measured from when deletion is first initiated. + + + Warning: this feature takes precedence over a Pod's terminationGracePeriodSeconds value, and bypasses any blocked PDBs or the karpenter.sh/do-not-disrupt annotation. + + + This field is intended to be used by cluster administrators to enforce that nodes can be cycled within a given time period. + When set, drifted nodes will begin draining even if there are pods blocking eviction. Draining will respect PDBs and the do-not-disrupt annotation until the TGP is reached. + + + Karpenter will preemptively delete pods so their terminationGracePeriodSeconds align with the node's terminationGracePeriod. + If a pod would be terminated without being granted its full terminationGracePeriodSeconds prior to the node timeout, + that pod will be deleted at T = node timeout - pod terminationGracePeriodSeconds. + + + The feature can also be used to allow maximum time limits for long-running jobs which can delay node termination with preStop hooks. + If left undefined, the controller will wait indefinitely for pods to be drained. + pattern: ^([0-9]+(s|m|h))+$ + type: string + required: + - nodeClassRef + - requirements + type: object + required: + - spec + type: object + weight: + description: |- + Weight is the priority given to the nodepool during scheduling. A higher + numerical weight indicates that this nodepool will be ordered + ahead of other nodepools with lower weights. A nodepool with no weight + will be treated as if it is a nodepool with a weight of 0. + format: int32 + maximum: 100 + minimum: 1 + type: integer + required: + - template + type: object + status: + description: NodePoolStatus defines the observed state of NodePool + properties: + conditions: + description: Conditions contains signals for health and readiness + items: + description: Condition aliases the upstream type and adds additional helper methods + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + resources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Resources is the list of resources that have been provisioned. + type: object + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} - additionalPrinterColumns: - jsonPath: .spec.template.spec.nodeClassRef.name name: NodeClass @@ -373,6 +859,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic maxLength: 63 pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$ required: diff --git a/pkg/apis/v1/doc.go b/pkg/apis/v1/doc.go new file mode 100644 index 0000000000..634f9bc1b3 --- /dev/null +++ b/pkg/apis/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +groupName=karpenter.sh +package v1 // doc.go is discovered by codegen diff --git a/pkg/apis/v1/duration.go b/pkg/apis/v1/duration.go new file mode 100644 index 0000000000..5d4436984e --- /dev/null +++ b/pkg/apis/v1/duration.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" +) + +const Never = "Never" + +// NillableDuration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. It uses the value "Never" to signify +// that the duration is disabled and sets the inner duration as nil +type NillableDuration struct { + *time.Duration +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (d *NillableDuration) UnmarshalJSON(b []byte) error { + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + if str == Never { + return nil + } + pd, err := time.ParseDuration(str) + if err != nil { + return err + } + d.Duration = &pd + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d NillableDuration) MarshalJSON() ([]byte, error) { + if d.Duration == nil { + return json.Marshal(Never) + } + return json.Marshal(d.Duration.String()) +} + +// ToUnstructured implements the value.UnstructuredConverter interface. +func (d NillableDuration) ToUnstructured() interface{} { + if d.Duration == nil { + return Never + } + return d.Duration.String() +} diff --git a/pkg/apis/v1/labels.go b/pkg/apis/v1/labels.go new file mode 100644 index 0000000000..84b337e49d --- /dev/null +++ b/pkg/apis/v1/labels.go @@ -0,0 +1,142 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Well known labels and resources +const ( + ArchitectureAmd64 = "amd64" + ArchitectureArm64 = "arm64" + CapacityTypeSpot = "spot" + CapacityTypeOnDemand = "on-demand" +) + +// Karpenter specific domains and labels +const ( + NodePoolLabelKey = Group + "/nodepool" + NodeInitializedLabelKey = Group + "/initialized" + NodeRegisteredLabelKey = Group + "/registered" + CapacityTypeLabelKey = Group + "/capacity-type" +) + +// Karpenter specific annotations +const ( + DoNotDisruptAnnotationKey = Group + "/do-not-disrupt" + ProviderCompatibilityAnnotationKey = CompatibilityGroup + "/provider" + NodePoolHashAnnotationKey = Group + "/nodepool-hash" + NodePoolHashVersionAnnotationKey = Group + "/nodepool-hash-version" + KubeletCompatibilityAnnotationKey = CompatibilityGroup + "/v1beta1-kubelet-conversion" + NodeClaimTerminationTimestampAnnotationKey = Group + "/nodeclaim-termination-timestamp" +) + +// Karpenter specific finalizers +const ( + TerminationFinalizer = Group + "/termination" +) + +var ( + // RestrictedLabelDomains are either prohibited by the kubelet or reserved by karpenter + RestrictedLabelDomains = sets.New( + "kubernetes.io", + "k8s.io", + Group, + ) + + // LabelDomainExceptions are sub-domains of the RestrictedLabelDomains but allowed because + // they are not used in a context where they may be passed as argument to kubelet. + LabelDomainExceptions = sets.New( + "kops.k8s.io", + v1.LabelNamespaceSuffixNode, + v1.LabelNamespaceNodeRestriction, + ) + + // WellKnownLabels are labels that belong to the RestrictedLabelDomains but allowed. + // Karpenter is aware of these labels, and they can be used to further narrow down + // the range of the corresponding values by either nodepool or pods. + WellKnownLabels = sets.New( + NodePoolLabelKey, + v1.LabelTopologyZone, + v1.LabelTopologyRegion, + v1.LabelInstanceTypeStable, + v1.LabelArchStable, + v1.LabelOSStable, + CapacityTypeLabelKey, + v1.LabelWindowsBuild, + ) + + // RestrictedLabels are labels that should not be used + // because they may interfere with the internal provisioning logic. + RestrictedLabels = sets.New( + v1.LabelHostname, + ) + + // NormalizedLabels translate aliased concepts into the controller's + // WellKnownLabels. Pod requirements are translated for compatibility. + NormalizedLabels = map[string]string{ + v1.LabelFailureDomainBetaZone: v1.LabelTopologyZone, + "beta.kubernetes.io/arch": v1.LabelArchStable, + "beta.kubernetes.io/os": v1.LabelOSStable, + v1.LabelInstanceType: v1.LabelInstanceTypeStable, + v1.LabelFailureDomainBetaRegion: v1.LabelTopologyRegion, + } +) + +// IsRestrictedLabel returns an error if the label is restricted. +func IsRestrictedLabel(key string) error { + if WellKnownLabels.Has(key) { + return nil + } + if IsRestrictedNodeLabel(key) { + return fmt.Errorf("label %s is restricted; specify a well known label: %v, or a custom label that does not use a restricted domain: %v", key, sets.List(WellKnownLabels), sets.List(RestrictedLabelDomains)) + } + return nil +} + +// IsRestrictedNodeLabel returns true if a node label should not be injected by Karpenter. +// They are either known labels that will be injected by cloud providers, +// or label domain managed by other software (e.g., kops.k8s.io managed by kOps). +func IsRestrictedNodeLabel(key string) bool { + if WellKnownLabels.Has(key) { + return true + } + labelDomain := GetLabelDomain(key) + for exceptionLabelDomain := range LabelDomainExceptions { + if strings.HasSuffix(labelDomain, exceptionLabelDomain) { + return false + } + } + for restrictedLabelDomain := range RestrictedLabelDomains { + if strings.HasSuffix(labelDomain, restrictedLabelDomain) { + return true + } + } + return RestrictedLabels.Has(key) +} + +func GetLabelDomain(key string) string { + if parts := strings.SplitN(key, "/", 2); len(parts) == 2 { + return parts[0] + } + return "" +} diff --git a/pkg/apis/v1/nodeclaim.go b/pkg/apis/v1/nodeclaim.go new file mode 100644 index 0000000000..a6bf30893c --- /dev/null +++ b/pkg/apis/v1/nodeclaim.go @@ -0,0 +1,142 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// NodeClaimSpec describes the desired state of the NodeClaim +type NodeClaimSpec struct { + // Taints will be applied to the NodeClaim's node. + // +optional + Taints []v1.Taint `json:"taints,omitempty"` + // StartupTaints are taints that are applied to nodes upon startup which are expected to be removed automatically + // within a short period of time, typically by a DaemonSet that tolerates the taint. These are commonly used by + // daemonsets to allow initialization and enforce startup ordering. StartupTaints are ignored for provisioning + // purposes in that pods are not required to tolerate a StartupTaint in order to have nodes provisioned for them. + // +optional + StartupTaints []v1.Taint `json:"startupTaints,omitempty"` + // Requirements are layered with GetLabels and applied to every node. + // +kubebuilder:validation:XValidation:message="requirements with operator 'In' must have a value defined",rule="self.all(x, x.operator == 'In' ? x.values.size() != 0 : true)" + // +kubebuilder:validation:XValidation:message="requirements operator 'Gt' or 'Lt' must have a single positive integer value",rule="self.all(x, (x.operator == 'Gt' || x.operator == 'Lt') ? (x.values.size() == 1 && int(x.values[0]) >= 0) : true)" + // +kubebuilder:validation:XValidation:message="requirements with 'minValues' must have at least that many values specified in the 'values' field",rule="self.all(x, (x.operator == 'In' && has(x.minValues)) ? x.values.size() >= x.minValues : true)" + // +kubebuilder:validation:MaxItems:=100 + // +required + Requirements []NodeSelectorRequirementWithMinValues `json:"requirements" hash:"ignore"` + // Resources models the resource requirements for the NodeClaim to launch + // +optional + Resources ResourceRequirements `json:"resources,omitempty" hash:"ignore"` + // NodeClassRef is a reference to an object that defines provider specific configuration + // +required + NodeClassRef *NodeClassReference `json:"nodeClassRef"` + // TerminationGracePeriod is the maximum duration the controller will wait before forcefully deleting the pods on a node, measured from when deletion is first initiated. + // + // Warning: this feature takes precedence over a Pod's terminationGracePeriodSeconds value, and bypasses any blocked PDBs or the karpenter.sh/do-not-disrupt annotation. + // + // This field is intended to be used by cluster administrators to enforce that nodes can be cycled within a given time period. + // When set, drifted nodes will begin draining even if there are pods blocking eviction. Draining will respect PDBs and the do-not-disrupt annotation until the TGP is reached. + // + // Karpenter will preemptively delete pods so their terminationGracePeriodSeconds align with the node's terminationGracePeriod. + // If a pod would be terminated without being granted its full terminationGracePeriodSeconds prior to the node timeout, + // that pod will be deleted at T = node timeout - pod terminationGracePeriodSeconds. + // + // The feature can also be used to allow maximum time limits for long-running jobs which can delay node termination with preStop hooks. + // If left undefined, the controller will wait indefinitely for pods to be drained. + // +kubebuilder:validation:Pattern=`^([0-9]+(s|m|h))+$` + // +kubebuilder:validation:Type="string" + // +optional + TerminationGracePeriod *metav1.Duration `json:"terminationGracePeriod,omitempty"` + // ExpireAfter is the duration the controller will wait + // before terminating a node, measured from when the node is created. This + // is useful to implement features like eventually consistent node upgrade, + // memory leak protection, and disruption testing. + // +kubebuilder:default:="720h" + // +kubebuilder:validation:Pattern=`^(([0-9]+(s|m|h))+)|(Never)$` + // +kubebuilder:validation:Type="string" + // +kubebuilder:validation:Schemaless + // +optional + ExpireAfter NillableDuration `json:"expireAfter,omitempty"` +} + +// A node selector requirement with min values is a selector that contains values, a key, an operator that relates the key and values +// and minValues that represent the requirement to have at least that many values. +type NodeSelectorRequirementWithMinValues struct { + v1.NodeSelectorRequirement `json:",inline"` + // This field is ALPHA and can be dropped or replaced at any time + // MinValues is the minimum number of unique values required to define the flexibility of the specific requirement. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=50 + // +optional + MinValues *int `json:"minValues,omitempty"` +} + +// ResourceRequirements models the required resources for the NodeClaim to launch +// Ths will eventually be transformed into v1.ResourceRequirements when we support resources.limits +type ResourceRequirements struct { + // Requests describes the minimum required resources for the NodeClaim to launch + // +optional + Requests v1.ResourceList `json:"requests,omitempty"` +} + +type NodeClassReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // +required + Kind string `json:"kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +required + Name string `json:"name"` + // API version of the referent + // +required + Group string `json:"group"` +} + +// +kubebuilder:object:generate=false +type Provider = runtime.RawExtension + +// NodeClaim is the Schema for the NodeClaims API +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=nodeclaims,scope=Cluster,categories=karpenter +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".metadata.labels.node\\.kubernetes\\.io/instance-type",description="" +// +kubebuilder:printcolumn:name="Capacity",type="string",JSONPath=".metadata.labels.karpenter\\.sh/capacity-type",description="" +// +kubebuilder:printcolumn:name="Zone",type="string",JSONPath=".metadata.labels.topology\\.kubernetes\\.io/zone",description="" +// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.nodeName",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="ID",type="string",JSONPath=".status.providerID",priority=1,description="" +// +kubebuilder:printcolumn:name="NodePool",type="string",JSONPath=".metadata.labels.karpenter\\.sh/nodepool",priority=1,description="" +// +kubebuilder:printcolumn:name="NodeClass",type="string",JSONPath=".spec.nodeClassRef.name",priority=1,description="" +type NodeClaim struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="spec is immutable" + // +required + Spec NodeClaimSpec `json:"spec"` + Status NodeClaimStatus `json:"status,omitempty"` +} + +// NodeClaimList contains a list of NodeClaims +// +kubebuilder:object:root=true +type NodeClaimList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodeClaim `json:"items"` +} diff --git a/pkg/apis/v1/nodeclaim_defaults.go b/pkg/apis/v1/nodeclaim_defaults.go new file mode 100644 index 0000000000..a7f9938907 --- /dev/null +++ b/pkg/apis/v1/nodeclaim_defaults.go @@ -0,0 +1,22 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "context" + +// SetDefaults for the NodeClaim +func (in *NodeClaim) SetDefaults(_ context.Context) {} diff --git a/pkg/apis/v1/nodeclaim_status.go b/pkg/apis/v1/nodeclaim_status.go new file mode 100644 index 0000000000..532eafc967 --- /dev/null +++ b/pkg/apis/v1/nodeclaim_status.go @@ -0,0 +1,76 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/awslabs/operatorpkg/status" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ConditionTypeLaunched = "Launched" + ConditionTypeRegistered = "Registered" + ConditionTypeInitialized = "Initialized" + ConditionTypeConsolidatable = "Consolidatable" + ConditionTypeDrifted = "Drifted" + ConditionTypeTerminating = "Terminating" + ConditionTypeConsistentStateFound = "ConsistentStateFound" +) + +// NodeClaimStatus defines the observed state of NodeClaim +type NodeClaimStatus struct { + // NodeName is the name of the corresponding node object + // +optional + NodeName string `json:"nodeName,omitempty"` + // ProviderID of the corresponding node object + // +optional + ProviderID string `json:"providerID,omitempty"` + // ImageID is an identifier for the image that runs on the node + // +optional + ImageID string `json:"imageID,omitempty"` + // Capacity is the estimated full capacity of the node + // +optional + Capacity v1.ResourceList `json:"capacity,omitempty"` + // Allocatable is the estimated allocatable capacity of the node + // +optional + Allocatable v1.ResourceList `json:"allocatable,omitempty"` + // Conditions contains signals for health and readiness + // +optional + Conditions []status.Condition `json:"conditions,omitempty"` + // LastPodEventTime is updated with the last time a pod was scheduled + // or removed from the node. A pod going terminal or terminating + // is also considered as removed. + // +optional + LastPodEventTime metav1.Time `json:"lastPodEventTime,omitempty"` +} + +func (in *NodeClaim) StatusConditions() status.ConditionSet { + return status.NewReadyConditions( + ConditionTypeLaunched, + ConditionTypeRegistered, + ConditionTypeInitialized, + ).For(in) +} + +func (in *NodeClaim) GetConditions() []status.Condition { + return in.Status.Conditions +} + +func (in *NodeClaim) SetConditions(conditions []status.Condition) { + in.Status.Conditions = conditions +} diff --git a/pkg/apis/v1/nodeclaim_validation.go b/pkg/apis/v1/nodeclaim_validation.go new file mode 100644 index 0000000000..63c3384b75 --- /dev/null +++ b/pkg/apis/v1/nodeclaim_validation.go @@ -0,0 +1,154 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "strconv" + + "github.com/samber/lo" + "go.uber.org/multierr" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "knative.dev/pkg/apis" +) + +var ( + SupportedNodeSelectorOps = sets.NewString( + string(v1.NodeSelectorOpIn), + string(v1.NodeSelectorOpNotIn), + string(v1.NodeSelectorOpGt), + string(v1.NodeSelectorOpLt), + string(v1.NodeSelectorOpExists), + string(v1.NodeSelectorOpDoesNotExist), + ) + + SupportedReservedResources = sets.NewString( + v1.ResourceCPU.String(), + v1.ResourceMemory.String(), + v1.ResourceEphemeralStorage.String(), + "pid", + ) + + SupportedEvictionSignals = sets.NewString( + "memory.available", + "nodefs.available", + "nodefs.inodesFree", + "imagefs.available", + "imagefs.inodesFree", + "pid.available", + ) +) + +type taintKeyEffect struct { + OwnerKey string + Effect v1.TaintEffect +} + +func (in *NodeClaimSpec) validateTaints() (errs *apis.FieldError) { + existing := map[taintKeyEffect]struct{}{} + errs = errs.Also(validateTaintsField(in.Taints, existing, "taints")) + errs = errs.Also(validateTaintsField(in.StartupTaints, existing, "startupTaints")) + return errs +} + +func validateTaintsField(taints []v1.Taint, existing map[taintKeyEffect]struct{}, fieldName string) *apis.FieldError { + var errs *apis.FieldError + for i, taint := range taints { + // Validate OwnerKey + if len(taint.Key) == 0 { + errs = errs.Also(apis.ErrInvalidArrayValue(errs, fieldName, i)) + } + for _, err := range validation.IsQualifiedName(taint.Key) { + errs = errs.Also(apis.ErrInvalidArrayValue(err, fieldName, i)) + } + // Validate Value + if len(taint.Value) != 0 { + for _, err := range validation.IsQualifiedName(taint.Value) { + errs = errs.Also(apis.ErrInvalidArrayValue(err, fieldName, i)) + } + } + // Validate effect + switch taint.Effect { + case v1.TaintEffectNoSchedule, v1.TaintEffectPreferNoSchedule, v1.TaintEffectNoExecute, "": + default: + errs = errs.Also(apis.ErrInvalidArrayValue(taint.Effect, fieldName, i)) + } + + // Check for duplicate OwnerKey/Effect pairs + key := taintKeyEffect{OwnerKey: taint.Key, Effect: taint.Effect} + if _, ok := existing[key]; ok { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("duplicate taint Key/Effect pair %s=%s", taint.Key, taint.Effect), apis.CurrentField). + ViaFieldIndex("taints", i)) + } + existing[key] = struct{}{} + } + return errs +} + +// This function is used by the NodeClaim validation webhook to verify the nodepool requirements. +// When this function is called, the nodepool's requirements do not include the requirements from labels. +// NodeClaim requirements only support well known labels. +func (in *NodeClaimSpec) validateRequirements() (errs *apis.FieldError) { + for i, requirement := range in.Requirements { + if err := ValidateRequirement(requirement); err != nil { + errs = errs.Also(apis.ErrInvalidArrayValue(err, "requirements", i)) + } + } + return errs +} + +func ValidateRequirement(requirement NodeSelectorRequirementWithMinValues) error { //nolint:gocyclo + var errs error + if normalized, ok := NormalizedLabels[requirement.Key]; ok { + requirement.Key = normalized + } + if !SupportedNodeSelectorOps.Has(string(requirement.Operator)) { + errs = multierr.Append(errs, fmt.Errorf("key %s has an unsupported operator %s not in %s", requirement.Key, requirement.Operator, SupportedNodeSelectorOps.UnsortedList())) + } + if e := IsRestrictedLabel(requirement.Key); e != nil { + errs = multierr.Append(errs, e) + } + for _, err := range validation.IsQualifiedName(requirement.Key) { + errs = multierr.Append(errs, fmt.Errorf("key %s is not a qualified name, %s", requirement.Key, err)) + } + for _, value := range requirement.Values { + for _, err := range validation.IsValidLabelValue(value) { + errs = multierr.Append(errs, fmt.Errorf("invalid value %s for key %s, %s", value, requirement.Key, err)) + } + } + if requirement.Operator == v1.NodeSelectorOpIn && len(requirement.Values) == 0 { + errs = multierr.Append(errs, fmt.Errorf("key %s with operator %s must have a value defined", requirement.Key, requirement.Operator)) + } + + if requirement.Operator == v1.NodeSelectorOpIn && requirement.MinValues != nil && len(requirement.Values) < lo.FromPtr(requirement.MinValues) { + errs = multierr.Append(errs, fmt.Errorf("key %s with operator %s must have at least minimum number of values defined in 'values' field", requirement.Key, requirement.Operator)) + } + + if requirement.Operator == v1.NodeSelectorOpGt || requirement.Operator == v1.NodeSelectorOpLt { + if len(requirement.Values) != 1 { + errs = multierr.Append(errs, fmt.Errorf("key %s with operator %s must have a single positive integer value", requirement.Key, requirement.Operator)) + } else { + value, err := strconv.Atoi(requirement.Values[0]) + if err != nil || value < 0 { + errs = multierr.Append(errs, fmt.Errorf("key %s with operator %s must have a single positive integer value", requirement.Key, requirement.Operator)) + } + } + } + return errs +} diff --git a/pkg/apis/v1/nodeclaim_validation_cel_test.go b/pkg/apis/v1/nodeclaim_validation_cel_test.go new file mode 100644 index 0000000000..858de4c673 --- /dev/null +++ b/pkg/apis/v1/nodeclaim_validation_cel_test.go @@ -0,0 +1,237 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "strconv" + "strings" + "time" + + "sigs.k8s.io/karpenter/pkg/test" + + "github.com/Pallinder/go-randomdata" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/samber/lo" + + . "sigs.k8s.io/karpenter/pkg/apis/v1" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +var _ = Describe("Validation", func() { + var nodeClaim *NodeClaim + + BeforeEach(func() { + if env.Version.Minor() < 25 { + Skip("CEL Validation is for 1.25>") + } + nodeClaim = &NodeClaim{ + ObjectMeta: metav1.ObjectMeta{Name: strings.ToLower(randomdata.SillyName())}, + Spec: NodeClaimSpec{ + NodeClassRef: &NodeClassReference{ + Kind: "NodeClaim", + Name: "default", + }, + Requirements: []NodeSelectorRequirementWithMinValues{ + { + NodeSelectorRequirement: v1.NodeSelectorRequirement{ + Key: CapacityTypeLabelKey, + Operator: v1.NodeSelectorOpExists, + }, + }, + }, + }, + } + }) + + Context("Taints", func() { + It("should succeed for valid taints", func() { + nodeClaim.Spec.Taints = []v1.Taint{ + {Key: "a", Value: "b", Effect: v1.TaintEffectNoSchedule}, + {Key: "c", Value: "d", Effect: v1.TaintEffectNoExecute}, + {Key: "e", Value: "f", Effect: v1.TaintEffectPreferNoSchedule}, + {Key: "key-only", Effect: v1.TaintEffectNoExecute}, + } + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + }) + It("should fail for invalid taint keys", func() { + nodeClaim.Spec.Taints = []v1.Taint{{Key: "???"}} + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + It("should fail for missing taint key", func() { + nodeClaim.Spec.Taints = []v1.Taint{{Effect: v1.TaintEffectNoSchedule}} + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + It("should fail for invalid taint value", func() { + nodeClaim.Spec.Taints = []v1.Taint{{Key: "invalid-value", Effect: v1.TaintEffectNoSchedule, Value: "???"}} + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + It("should fail for invalid taint effect", func() { + nodeClaim.Spec.Taints = []v1.Taint{{Key: "invalid-effect", Effect: "???"}} + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + It("should not fail for same key with different effects", func() { + nodeClaim.Spec.Taints = []v1.Taint{ + {Key: "a", Effect: v1.TaintEffectNoSchedule}, + {Key: "a", Effect: v1.TaintEffectNoExecute}, + } + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + }) + }) + Context("Requirements", func() { + It("should allow supported ops", func() { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{"1"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{"1"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpExists}}, + } + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + }) + It("should fail for unsupported ops", func() { + for _, op := range []v1.NodeSelectorOperator{"unknown"} { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: op, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + } + }) + It("should fail for restricted domains", func() { + for label := range RestrictedLabelDomains { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + } + }) + It("should allow restricted domains exceptions", func() { + oldNodeClaim := nodeClaim.DeepCopy() + for label := range LabelDomainExceptions { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodeClaim)).To(Succeed()) + nodeClaim = oldNodeClaim.DeepCopy() + } + }) + It("should allow restricted subdomains exceptions", func() { + oldNodeClaim := nodeClaim.DeepCopy() + for label := range LabelDomainExceptions { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "subdomain." + label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodeClaim)).To(Succeed()) + nodeClaim = oldNodeClaim.DeepCopy() + } + }) + It("should allow well known label exceptions", func() { + oldNodeClaim := nodeClaim.DeepCopy() + for label := range WellKnownLabels.Difference(sets.New(NodePoolLabelKey)) { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodeClaim)).To(Succeed()) + nodeClaim = oldNodeClaim.DeepCopy() + } + }) + It("should allow non-empty set after removing overlapped value", func() { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test", "foo"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test", "bar"}}}, + } + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + }) + It("should allow empty requirements", func() { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{} + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + }) + It("should fail with invalid GT or LT values", func() { + for _, requirement := range []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{"1", "2"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{"a"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{"-1"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{"1", "2"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{"a"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{"-1"}}}, + } { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{requirement} + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + } + }) + It("should error when minValues is negative", func() { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"insance-type-1"}}, MinValues: lo.ToPtr(-1)}, + } + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + It("should error when minValues is zero", func() { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"insance-type-1"}}, MinValues: lo.ToPtr(0)}, + } + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + It("should error when minValues is more than 50", func() { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpExists}, MinValues: lo.ToPtr(51)}, + } + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + It("should allow more than 50 values if minValues is not specified.", func() { + var instanceTypes []string + for i := 0; i < 90; i++ { + instanceTypes = append(instanceTypes, "instance"+strconv.Itoa(i)) + } + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: instanceTypes}}, + } + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + }) + It("should error when minValues is greater than the number of unique values specified within In operator", func() { + nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"insance-type-1"}}, MinValues: lo.ToPtr(2)}, + } + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + It("should error when requirements is greater than 100", func() { + var req []NodeSelectorRequirementWithMinValues + for i := 0; i < 101; i++ { + req = append(req, NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: test.RandomName(), Operator: v1.NodeSelectorOpIn, Values: []string{test.RandomName()}}}) + } + nodeClaim.Spec.Requirements = req + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + }) + Context("TerminationGracePeriod", func() { + It("should succeed on a positive terminationGracePeriod duration", func() { + nodeClaim.Spec.TerminationGracePeriod = &metav1.Duration{Duration: time.Second * 300} + Expect(env.Client.Create(ctx, nodeClaim)).To(Succeed()) + }) + It("should fail on a negative terminationGracePeriod duration", func() { + nodeClaim.Spec.TerminationGracePeriod = &metav1.Duration{Duration: time.Second * -30} + Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) + }) + }) +}) diff --git a/pkg/apis/v1/nodepool.go b/pkg/apis/v1/nodepool.go new file mode 100644 index 0000000000..3de3cc3f51 --- /dev/null +++ b/pkg/apis/v1/nodepool.go @@ -0,0 +1,331 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "math" + "sort" + "strconv" + + "github.com/mitchellh/hashstructure/v2" + "github.com/robfig/cron/v3" + "github.com/samber/lo" + "go.uber.org/multierr" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/clock" +) + +// NodePoolSpec is the top level nodepool specification. Nodepools +// launch nodes in response to pods that are unschedulable. A single nodepool +// is capable of managing a diverse set of nodes. Node properties are determined +// from a combination of nodepool and pod scheduling constraints. +type NodePoolSpec struct { + // Template contains the template of possibilities for the provisioning logic to launch a NodeClaim with. + // NodeClaims launched from this NodePool will often be further constrained than the template specifies. + // +required + Template NodeClaimTemplate `json:"template"` + // Disruption contains the parameters that relate to Karpenter's disruption logic + // +optional + Disruption Disruption `json:"disruption"` + // Limits define a set of bounds for provisioning capacity. + // +optional + Limits Limits `json:"limits,omitempty"` + // Weight is the priority given to the nodepool during scheduling. A higher + // numerical weight indicates that this nodepool will be ordered + // ahead of other nodepools with lower weights. A nodepool with no weight + // will be treated as if it is a nodepool with a weight of 0. + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=100 + // +optional + Weight *int32 `json:"weight,omitempty"` +} + +type Disruption struct { + // ConsolidateAfter is the duration the controller will wait + // before attempting to terminate nodes that are underutilized. + // Refer to ConsolidationPolicy for how underutilization is considered. + // +kubebuilder:validation:Pattern=`^(([0-9]+(s|m|h))+)|(Never)$` + // +kubebuilder:validation:Type="string" + // +kubebuilder:validation:Schemaless + // +required + ConsolidateAfter NillableDuration `json:"consolidateAfter"` + // ConsolidationPolicy describes which nodes Karpenter can disrupt through its consolidation + // algorithm. This policy defaults to "WhenUnderutilized" if not specified + // +kubebuilder:default:="WhenUnderutilized" + // +kubebuilder:validation:Enum:={WhenEmpty,WhenUnderutilized} + // +optional + ConsolidationPolicy ConsolidationPolicy `json:"consolidationPolicy,omitempty"` + // Budgets is a list of Budgets. + // If there are multiple active budgets, Karpenter uses + // the most restrictive value. If left undefined, + // this will default to one budget with a value to 10%. + // +kubebuilder:validation:XValidation:message="'schedule' must be set with 'duration'",rule="self.all(x, has(x.schedule) == has(x.duration))" + // +kubebuilder:default:={{nodes: "10%"}} + // +kubebuilder:validation:MaxItems=50 + // +optional + Budgets []Budget `json:"budgets,omitempty" hash:"ignore"` +} + +// Budget defines when Karpenter will restrict the +// number of Node Claims that can be terminating simultaneously. +type Budget struct { + // Reasons is a list of disruption methods that this budget applies to. If Reasons is not set, this budget applies to all methods. + // Otherwise, this will apply to each reason defined. + // allowed reasons are Underutilized, Empty, and Drifted. + // +optional + Reasons []DisruptionReason `json:"reasons,omitempty"` + // Nodes dictates the maximum number of NodeClaims owned by this NodePool + // that can be terminating at once. This is calculated by counting nodes that + // have a deletion timestamp set, or are actively being deleted by Karpenter. + // This field is required when specifying a budget. + // This cannot be of type intstr.IntOrString since kubebuilder doesn't support pattern + // checking for int nodes for IntOrString nodes. + // Ref: https://github.com/kubernetes-sigs/controller-tools/blob/55efe4be40394a288216dab63156b0a64fb82929/pkg/crd/markers/validation.go#L379-L388 + // +kubebuilder:validation:Pattern:="^((100|[0-9]{1,2})%|[0-9]+)$" + // +kubebuilder:default:="10%" + Nodes string `json:"nodes" hash:"ignore"` + // Schedule specifies when a budget begins being active, following + // the upstream cronjob syntax. If omitted, the budget is always active. + // Timezones are not supported. + // This field is required if Duration is set. + // +kubebuilder:validation:Pattern:=`^(@(annually|yearly|monthly|weekly|daily|midnight|hourly))|((.+)\s(.+)\s(.+)\s(.+)\s(.+))$` + // +optional + Schedule *string `json:"schedule,omitempty" hash:"ignore"` + // Duration determines how long a Budget is active since each Schedule hit. + // Only minutes and hours are accepted, as cron does not work in seconds. + // If omitted, the budget is always active. + // This is required if Schedule is set. + // This regex has an optional 0s at the end since the duration.String() always adds + // a 0s at the end. + // +kubebuilder:validation:Pattern=`^((([0-9]+(h|m))|([0-9]+h[0-9]+m))(0s)?)$` + // +kubebuilder:validation:Type="string" + // +optional + Duration *metav1.Duration `json:"duration,omitempty" hash:"ignore"` +} + +type ConsolidationPolicy string + +const ( + ConsolidationPolicyWhenEmpty ConsolidationPolicy = "WhenEmpty" + ConsolidationPolicyWhenUnderutilized ConsolidationPolicy = "WhenUnderutilized" +) + +// DisruptionReason defines valid reasons for disruption budgets. +// +kubebuilder:validation:Enum={Underutilized,Empty,Drifted} +type DisruptionReason string + +const ( + DisruptionReasonUnderutilized DisruptionReason = "Underutilized" + DisruptionReasonEmpty DisruptionReason = "Empty" + DisruptionReasonDrifted DisruptionReason = "Drifted" +) + +var ( + // DisruptionReasons is a list of all valid reasons for disruption budgets. + WellKnownDisruptionReasons = []DisruptionReason{DisruptionReasonUnderutilized, DisruptionReasonEmpty, DisruptionReasonDrifted} +) + +type Limits v1.ResourceList + +func (l Limits) ExceededBy(resources v1.ResourceList) error { + if l == nil { + return nil + } + for resourceName, usage := range resources { + if limit, ok := l[resourceName]; ok { + if usage.Cmp(limit) > 0 { + return fmt.Errorf("%s resource usage of %v exceeds limit of %v", resourceName, usage.AsDec(), limit.AsDec()) + } + } + } + return nil +} + +type NodeClaimTemplate struct { + ObjectMeta `json:"metadata,omitempty"` + // +required + Spec NodeClaimSpec `json:"spec"` +} + +type ObjectMeta struct { + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + +// NodePool is the Schema for the NodePools API +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=nodepools,scope=Cluster,categories=karpenter +// +kubebuilder:printcolumn:name="NodeClass",type="string",JSONPath=".spec.template.spec.nodeClassRef.name",description="" +// +kubebuilder:printcolumn:name="Nodes",type="string",JSONPath=".status.resources.nodes",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Weight",type="integer",JSONPath=".spec.weight",priority=1,description="" +// +kubebuilder:printcolumn:name="CPU",type="string",JSONPath=".status.resources.cpu",priority=1,description="" +// +kubebuilder:printcolumn:name="Memory",type="string",JSONPath=".status.resources.memory",priority=1,description="" +// +kubebuilder:subresource:status +type NodePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + Spec NodePoolSpec `json:"spec"` + Status NodePoolStatus `json:"status,omitempty"` +} + +// We need to bump the NodePoolHashVersion when we make an update to the NodePool CRD under these conditions: +// 1. A field changes its default value for an existing field that is already hashed +// 2. A field is added to the hash calculation with an already-set value +// 3. A field is removed from the hash calculations +const NodePoolHashVersion = "v3" + +func (in *NodePool) Hash() string { + return fmt.Sprint(lo.Must(hashstructure.Hash(in.Spec.Template, hashstructure.FormatV2, &hashstructure.HashOptions{ + SlicesAsSets: true, + IgnoreZeroValue: true, + ZeroNil: true, + }))) +} + +// NodePoolList contains a list of NodePool +// +kubebuilder:object:root=true +type NodePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodePool `json:"items"` +} + +// OrderByWeight orders the NodePools in the NodePoolList by their priority weight in-place. +// This priority evaluates the following things in precedence order: +// 1. NodePools that have a larger weight are ordered first +// 2. If two NodePools have the same weight, then the NodePool with the name later in the alphabet will come first +func (nl *NodePoolList) OrderByWeight() { + sort.Slice(nl.Items, func(a, b int) bool { + weightA := lo.FromPtr(nl.Items[a].Spec.Weight) + weightB := lo.FromPtr(nl.Items[b].Spec.Weight) + + if weightA == weightB { + // Order NodePools by name for a consistent ordering when sorting equal weight + return nl.Items[a].Name > nl.Items[b].Name + } + return weightA > weightB + }) +} + +// MustGetAllowedDisruptions calls GetAllowedDisruptionsByReason if the error is not nil. This reduces the +// amount of state that the disruption controller must reconcile, while allowing the GetAllowedDisruptionsByReason() +// to bubble up any errors in validation. +func (in *NodePool) MustGetAllowedDisruptions(ctx context.Context, c clock.Clock, numNodes int) map[DisruptionReason]int { + allowedDisruptions, err := in.GetAllowedDisruptionsByReason(ctx, c, numNodes) + if err != nil { + return map[DisruptionReason]int{} + } + return allowedDisruptions +} + +// GetAllowedDisruptionsByReason returns the minimum allowed disruptions across all disruption budgets, for all disruption methods for a given nodepool +func (in *NodePool) GetAllowedDisruptionsByReason(ctx context.Context, c clock.Clock, numNodes int) (map[DisruptionReason]int, error) { + var multiErr error + allowedDisruptions := map[DisruptionReason]int{} + for _, reason := range WellKnownDisruptionReasons { + allowedDisruptions[reason] = math.MaxInt32 + } + + for _, budget := range in.Spec.Disruption.Budgets { + val, err := budget.GetAllowedDisruptions(c, numNodes) + if err != nil { + multiErr = multierr.Append(multiErr, err) + } + // If reasons is nil, it applies to all well known disruption reasons + for _, reason := range lo.Ternary(budget.Reasons == nil, WellKnownDisruptionReasons, budget.Reasons) { + allowedDisruptions[reason] = lo.Min([]int{allowedDisruptions[reason], val}) + } + } + + return allowedDisruptions, multiErr +} + +// GetAllowedDisruptions returns an intstr.IntOrString that can be used a comparison +// for calculating if a disruption action is allowed. It returns an error if the +// schedule is invalid. This returns MAXINT if the value is unbounded. +func (in *Budget) GetAllowedDisruptions(c clock.Clock, numNodes int) (int, error) { + active, err := in.IsActive(c) + // If the budget is misconfigured, fail closed. + if err != nil { + return 0, err + } + if !active { + return math.MaxInt32, nil + } + // This will round up to the nearest whole number. Therefore, a disruption can + // sometimes exceed the disruption budget. This is the same as how Kubernetes + // handles MaxUnavailable with PDBs. Take the case with 5% disruptions, but + // 10 nodes. Karpenter will opt to allow 1 node to be disrupted, rather than + // blocking all disruptions for this nodepool. + res, err := intstr.GetScaledValueFromIntOrPercent(lo.ToPtr(GetIntStrFromValue(in.Nodes)), numNodes, true) + if err != nil { + // Should never happen since this is validated when the nodepool is applied + // If this value is incorrectly formatted, fail closed, since we don't know what + // they want here. + return 0, err + } + return res, nil +} + +// IsActive takes a clock as input and returns if a budget is active. +// It walks back in time the time.Duration associated with the schedule, +// and checks if the next time the schedule will hit is before the current time. +// If the last schedule hit is exactly the duration in the past, this means the +// schedule is active, as any more schedule hits in between would only extend this +// window. This ensures that any previous schedule hits for a schedule are considered. +func (in *Budget) IsActive(c clock.Clock) (bool, error) { + if in.Schedule == nil && in.Duration == nil { + return true, nil + } + schedule, err := cron.ParseStandard(fmt.Sprintf("TZ=UTC %s", lo.FromPtr(in.Schedule))) + if err != nil { + // Should only occur if there's a discrepancy + // with the validation regex and the cron package. + return false, fmt.Errorf("invariant violated, invalid cron %s", schedule) + } + // Walk back in time for the duration associated with the schedule + checkPoint := c.Now().UTC().Add(-lo.FromPtr(in.Duration).Duration) + nextHit := schedule.Next(checkPoint) + return !nextHit.After(c.Now().UTC()), nil +} + +func GetIntStrFromValue(str string) intstr.IntOrString { + // If err is nil, we treat it as an int. + if intVal, err := strconv.Atoi(str); err == nil { + return intstr.FromInt(intVal) + } + return intstr.FromString(str) +} diff --git a/pkg/apis/v1/nodepool_budgets_test.go b/pkg/apis/v1/nodepool_budgets_test.go new file mode 100644 index 0000000000..a9965d5f7e --- /dev/null +++ b/pkg/apis/v1/nodepool_budgets_test.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "context" + "math" + "strings" + "time" + + "github.com/Pallinder/go-randomdata" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/samber/lo" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clock "k8s.io/utils/clock/testing" + + . "sigs.k8s.io/karpenter/pkg/apis/v1" +) + +var _ = Describe("Budgets", func() { + var nodePool *NodePool + var budgets []Budget + var fakeClock *clock.FakeClock + var ctx = context.Background() + + BeforeEach(func() { + // Set the time to the middle of the year of 2000, the best year ever + fakeClock = clock.NewFakeClock(time.Date(2000, time.June, 15, 12, 30, 30, 0, time.UTC)) + budgets = []Budget{ + { + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + Duration: lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}), + }, + { + Nodes: "100", + Schedule: lo.ToPtr("* * * * *"), + Duration: lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}), + }, + { + Nodes: "100%", + Schedule: lo.ToPtr("* * * * *"), + Duration: lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}), + }, + { + Reasons: []DisruptionReason{ + DisruptionReasonDrifted, + DisruptionReasonUnderutilized, + }, + Nodes: "15", + Schedule: lo.ToPtr("* * * * *"), + Duration: lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}), + }, + { + Reasons: []DisruptionReason{ + DisruptionReasonDrifted, + }, + Nodes: "5", + Schedule: lo.ToPtr("* * * * *"), + Duration: lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}), + }, + { + Reasons: []DisruptionReason{ + DisruptionReasonUnderutilized, + DisruptionReasonDrifted, + DisruptionReasonEmpty, + }, + Nodes: "0", + Schedule: lo.ToPtr("@weekly"), + Duration: lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}), + }, + } + nodePool = &NodePool{ + ObjectMeta: metav1.ObjectMeta{Name: strings.ToLower(randomdata.SillyName())}, + Spec: NodePoolSpec{ + Disruption: Disruption{ + Budgets: budgets, + }, + }, + } + }) + + Context("GetAllowedDisruptionsByReason", func() { + It("should return 0 for all reasons if a budget is active for all reasons", func() { + budgets[5].Schedule = lo.ToPtr("* * * * *") + budgets[5].Duration = lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}) + + disruptionsByReason, err := nodePool.GetAllowedDisruptionsByReason(ctx, fakeClock, 100) + Expect(err).To(BeNil()) + Expect(disruptionsByReason[DisruptionReasonUnderutilized]).To(Equal(0)) + Expect(disruptionsByReason[DisruptionReasonDrifted]).To(Equal(0)) + Expect(disruptionsByReason[DisruptionReasonEmpty]).To(Equal(0)) + }) + + It("should return MaxInt32 for all reasons when there are no active budgets", func() { + for i := range budgets { + budgets[i].Schedule = lo.ToPtr("@yearly") + budgets[i].Duration = lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}) + } + disruptionsByReason, err := nodePool.GetAllowedDisruptionsByReason(ctx, fakeClock, 100) + Expect(err).To(BeNil()) + + // All budgets should have unbounded disruptions when inactive + for _, disruptions := range disruptionsByReason { + Expect(disruptions).To(Equal(math.MaxInt32)) + } + }) + + It("should ignore reason-defined budgets when inactive", func() { + budgets[3].Schedule = lo.ToPtr("@yearly") + budgets[4].Schedule = lo.ToPtr("@yearly") + disruptionsByReason, err := nodePool.GetAllowedDisruptionsByReason(ctx, fakeClock, 100) + Expect(err).To(BeNil()) + for _, disruptions := range disruptionsByReason { + Expect(disruptions).To(Equal(10)) + } + }) + + It("should return the budget for all disruption reasons when undefined", func() { + nodePool.Spec.Disruption.Budgets = budgets[:1] + Expect(len(nodePool.Spec.Disruption.Budgets)).To(Equal(1)) + disruptionsByReason, err := nodePool.GetAllowedDisruptionsByReason(ctx, fakeClock, 100) + Expect(err).To(BeNil()) + Expect(len(budgets[0].Reasons)).To(Equal(0)) + for _, disruptions := range disruptionsByReason { + Expect(disruptions).To(Equal(10)) + } + }) + + It("should get the minimum budget for each reason", func() { + + nodePool.Spec.Disruption.Budgets = append(nodePool.Spec.Disruption.Budgets, + []Budget{ + { + Schedule: lo.ToPtr("* * * * *"), + Nodes: "4", + Duration: lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}), + Reasons: []DisruptionReason{ + DisruptionReasonEmpty, + }, + }, + }...) + disruptionsByReason, err := nodePool.GetAllowedDisruptionsByReason(ctx, fakeClock, 100) + Expect(err).To(BeNil()) + + Expect(disruptionsByReason[DisruptionReasonEmpty]).To(Equal(4)) + Expect(disruptionsByReason[DisruptionReasonDrifted]).To(Equal(5)) + // The budget where reason == nil overrides the budget with a specified reason + Expect(disruptionsByReason[DisruptionReasonUnderutilized]).To(Equal(10)) + }) + + }) + + Context("AllowedDisruptions", func() { + It("should return zero values if a schedule is invalid", func() { + budgets[0].Schedule = lo.ToPtr("@wrongly") + val, err := budgets[0].GetAllowedDisruptions(fakeClock, 100) + Expect(err).ToNot(Succeed()) + Expect(val).To(BeNumerically("==", 0)) + }) + It("should return zero values if a nodes value is invalid", func() { + budgets[0].Nodes = "1000a%" + val, err := budgets[0].GetAllowedDisruptions(fakeClock, 100) + Expect(err).ToNot(Succeed()) + Expect(val).To(BeNumerically("==", 0)) + }) + It("should return MaxInt32 when a budget is inactive", func() { + budgets[0].Schedule = lo.ToPtr("@yearly") + budgets[0].Duration = lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("1h"))}) + val, err := budgets[0].GetAllowedDisruptions(fakeClock, 100) + Expect(err).To(Succeed()) + Expect(val).To(BeNumerically("==", math.MaxInt32)) + }) + It("should return the int value when a budget is active", func() { + val, err := budgets[0].GetAllowedDisruptions(fakeClock, 100) + Expect(err).To(Succeed()) + Expect(val).To(BeNumerically("==", 10)) + }) + It("should return the string value when a budget is active", func() { + val, err := budgets[2].GetAllowedDisruptions(fakeClock, 100) + Expect(err).To(Succeed()) + Expect(val).To(BeNumerically("==", 100)) + }) + }) + + Context("IsActive", func() { + It("should always consider a schedule and time in UTC", func() { + // Set the time to start of June 2000 in a time zone 1 hour ahead of UTC + fakeClock = clock.NewFakeClock(time.Date(2000, time.June, 0, 0, 0, 0, 0, time.FixedZone("fake-zone", 3600))) + budgets[0].Schedule = lo.ToPtr("@daily") + budgets[0].Duration = lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("30m"))}) + // IsActive should use UTC, not the location of the clock that's inputted. + active, err := budgets[0].IsActive(fakeClock) + Expect(err).To(Succeed()) + Expect(active).To(BeFalse()) + }) + It("should return that a schedule is active when schedule and duration are nil", func() { + budgets[0].Schedule = nil + budgets[0].Duration = nil + active, err := budgets[0].IsActive(fakeClock) + Expect(err).To(Succeed()) + Expect(active).To(BeTrue()) + }) + It("should return that a schedule is active", func() { + active, err := budgets[0].IsActive(fakeClock) + Expect(err).To(Succeed()) + Expect(active).To(BeTrue()) + }) + It("should return that a schedule is inactive", func() { + budgets[0].Schedule = lo.ToPtr("@yearly") + active, err := budgets[0].IsActive(fakeClock) + Expect(err).To(Succeed()) + Expect(active).To(BeFalse()) + }) + It("should return that a schedule is active when the schedule hit is in the middle of the duration", func() { + // Set the date to the start of the year 1000, the best year ever + fakeClock = clock.NewFakeClock(time.Date(1000, time.January, 1, 12, 0, 0, 0, time.UTC)) + budgets[0].Schedule = lo.ToPtr("@yearly") + budgets[0].Duration = lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("24h"))}) + active, err := budgets[0].IsActive(fakeClock) + Expect(err).To(Succeed()) + Expect(active).To(BeTrue()) + }) + It("should return that a schedule is active when the duration is longer than the recurrence", func() { + // Set the date to the first monday in 2024, the best year ever + fakeClock = clock.NewFakeClock(time.Date(2024, time.January, 7, 0, 0, 0, 0, time.UTC)) + budgets[0].Schedule = lo.ToPtr("@daily") + budgets[0].Duration = lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("48h"))}) + active, err := budgets[0].IsActive(fakeClock) + Expect(err).To(Succeed()) + Expect(active).To(BeTrue()) + }) + It("should return that a schedule is inactive when the schedule hit is after the duration", func() { + // Set the date to the first monday in 2024, the best year ever + fakeClock = clock.NewFakeClock(time.Date(2024, time.January, 7, 0, 0, 0, 0, time.UTC)) + budgets[0].Schedule = lo.ToPtr("30 6 * * SUN") + budgets[0].Duration = lo.ToPtr(metav1.Duration{Duration: lo.Must(time.ParseDuration("6h"))}) + active, err := budgets[0].IsActive(fakeClock) + Expect(err).To(Succeed()) + Expect(active).ToNot(BeTrue()) + }) + }) +}) diff --git a/pkg/apis/v1/nodepool_defaults.go b/pkg/apis/v1/nodepool_defaults.go new file mode 100644 index 0000000000..98d0990aee --- /dev/null +++ b/pkg/apis/v1/nodepool_defaults.go @@ -0,0 +1,24 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" +) + +// SetDefaults for the NodePool +func (in *NodePool) SetDefaults(_ context.Context) {} diff --git a/pkg/apis/v1/nodepool_status.go b/pkg/apis/v1/nodepool_status.go new file mode 100644 index 0000000000..1b3f974694 --- /dev/null +++ b/pkg/apis/v1/nodepool_status.go @@ -0,0 +1,55 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/awslabs/operatorpkg/status" + v1 "k8s.io/api/core/v1" +) + +const ( + // ConditionTypeValidationSucceeded = "ValidationSucceeded" condition indicates that the + // runtime-based configuration is valid for this NodePool + ConditionTypeValidationSucceeded = "ValidationSucceeded" + // ConditionTypeNodeClassReady = "NodeClassReady" condition indicates that underlying nodeClass was resolved and is reporting as Ready + ConditionTypeNodeClassReady = "NodeClassReady" +) + +// NodePoolStatus defines the observed state of NodePool +type NodePoolStatus struct { + // Resources is the list of resources that have been provisioned. + // +optional + Resources v1.ResourceList `json:"resources,omitempty"` + // Conditions contains signals for health and readiness + // +optional + Conditions []status.Condition `json:"conditions,omitempty"` +} + +func (in *NodePool) StatusConditions() status.ConditionSet { + return status.NewReadyConditions( + ConditionTypeValidationSucceeded, + ConditionTypeNodeClassReady, + ).For(in) +} + +func (in *NodePool) GetConditions() []status.Condition { + return in.Status.Conditions +} + +func (in *NodePool) SetConditions(conditions []status.Condition) { + in.Status.Conditions = conditions +} diff --git a/pkg/apis/v1/nodepool_validation.go b/pkg/apis/v1/nodepool_validation.go new file mode 100644 index 0000000000..be327c0885 --- /dev/null +++ b/pkg/apis/v1/nodepool_validation.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation" + "knative.dev/pkg/apis" +) + +// RuntimeValidate will be used to validate any part of the CRD that can not be validated at CRD creation +func (in *NodePool) RuntimeValidate() (errs *apis.FieldError) { + return errs.Also( + in.Spec.Template.validateLabels().ViaField("spec.template.metadata"), + in.Spec.Template.Spec.validateTaints().ViaField("spec.template.spec"), + in.Spec.Template.Spec.validateRequirements().ViaField("spec.template.spec"), + in.Spec.Template.validateRequirementsNodePoolKeyDoesNotExist().ViaField("spec.template.spec"), + ) +} + +func (in *NodeClaimTemplate) validateLabels() (errs *apis.FieldError) { + for key, value := range in.Labels { + if key == NodePoolLabelKey { + errs = errs.Also(apis.ErrInvalidKeyName(key, "labels", "restricted")) + } + for _, err := range validation.IsQualifiedName(key) { + errs = errs.Also(apis.ErrInvalidKeyName(key, "labels", err)) + } + for _, err := range validation.IsValidLabelValue(value) { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s, %s", value, err), fmt.Sprintf("labels[%s]", key))) + } + if err := IsRestrictedLabel(key); err != nil { + errs = errs.Also(apis.ErrInvalidKeyName(key, "labels", err.Error())) + } + } + return errs +} + +func (in *NodeClaimTemplate) validateRequirementsNodePoolKeyDoesNotExist() (errs *apis.FieldError) { + for i, requirement := range in.Spec.Requirements { + if requirement.Key == NodePoolLabelKey { + errs = errs.Also(apis.ErrInvalidArrayValue(fmt.Sprintf("%s is restricted", requirement.Key), "requirements", i)) + } + } + return errs +} diff --git a/pkg/apis/v1/nodepool_validation_cel_test.go b/pkg/apis/v1/nodepool_validation_cel_test.go new file mode 100644 index 0000000000..bca2ae3c4a --- /dev/null +++ b/pkg/apis/v1/nodepool_validation_cel_test.go @@ -0,0 +1,617 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/Pallinder/go-randomdata" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/samber/lo" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + . "sigs.k8s.io/karpenter/pkg/apis/v1" +) + +var _ = Describe("CEL/Validation", func() { + var nodePool *NodePool + + BeforeEach(func() { + if env.Version.Minor() < 25 { + Skip("CEL Validation is for 1.25>") + } + nodePool = &NodePool{ + ObjectMeta: metav1.ObjectMeta{Name: strings.ToLower(randomdata.SillyName())}, + Spec: NodePoolSpec{ + Template: NodeClaimTemplate{ + Spec: NodeClaimSpec{ + NodeClassRef: &NodeClassReference{ + Kind: "NodeClaim", + Name: "default", + }, + Requirements: []NodeSelectorRequirementWithMinValues{ + { + NodeSelectorRequirement: v1.NodeSelectorRequirement{ + Key: CapacityTypeLabelKey, + Operator: v1.NodeSelectorOpExists, + }, + }, + }, + }, + }, + }, + } + }) + Context("Disruption", func() { + It("should fail on negative expireAfter", func() { + nodePool.Spec.Template.Spec.ExpireAfter.Duration = lo.ToPtr(lo.Must(time.ParseDuration("-1s"))) + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should succeed on a disabled expireAfter", func() { + nodePool.Spec.Template.Spec.ExpireAfter.Duration = nil + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should succeed on a valid expireAfter", func() { + nodePool.Spec.Template.Spec.ExpireAfter.Duration = lo.ToPtr(lo.Must(time.ParseDuration("30s"))) + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should fail on negative consolidateAfter", func() { + nodePool.Spec.Disruption.ConsolidateAfter = NillableDuration{Duration: lo.ToPtr(lo.Must(time.ParseDuration("-1s")))} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should succeed on a disabled consolidateAfter", func() { + nodePool.Spec.Disruption.ConsolidateAfter = NillableDuration{Duration: nil} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should succeed on a valid consolidateAfter", func() { + nodePool.Spec.Disruption.ConsolidateAfter = NillableDuration{Duration: lo.ToPtr(lo.Must(time.ParseDuration("30s")))} + nodePool.Spec.Disruption.ConsolidationPolicy = ConsolidationPolicyWhenEmpty + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should succeed when setting consolidateAfter with consolidationPolicy=WhenEmpty", func() { + nodePool.Spec.Disruption.ConsolidateAfter = NillableDuration{Duration: lo.ToPtr(lo.Must(time.ParseDuration("30s")))} + nodePool.Spec.Disruption.ConsolidationPolicy = ConsolidationPolicyWhenEmpty + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should fail when setting consolidateAfter with consolidationPolicy=WhenUnderutilized", func() { + nodePool.Spec.Disruption.ConsolidateAfter = NillableDuration{Duration: lo.ToPtr(lo.Must(time.ParseDuration("30s")))} + nodePool.Spec.Disruption.ConsolidationPolicy = ConsolidationPolicyWhenUnderutilized + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should succeed when not setting consolidateAfter to 'Never' with consolidationPolicy=WhenUnderutilized", func() { + nodePool.Spec.Disruption.ConsolidateAfter = NillableDuration{Duration: nil} + nodePool.Spec.Disruption.ConsolidationPolicy = ConsolidationPolicyWhenUnderutilized + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should fail when creating a budget with an invalid cron", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("*"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should fail when creating a schedule with less than 5 entries", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("* * * * "), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should fail when creating a budget with a negative duration", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("-20m"))}, + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should fail when creating a budget with a seconds duration", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("30s"))}, + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should fail when creating a budget with a negative value int", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "-10", + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should fail when creating a budget with a negative value percent", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "-10%", + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should fail when creating a budget with a value percent with more than 3 digits", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "1000%", + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should fail when creating a budget with a cron but no duration", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should fail when creating a budget with a duration but no cron", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should succeed when creating a budget with both duration and cron", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + }} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should succeed when creating a budget with hours and minutes in duration", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("2h20m"))}, + }} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should succeed when creating a budget with neither duration nor cron", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + }} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should succeed when creating a budget with special cased crons", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("@annually"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + }} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should fail when creating two budgets where one has an invalid crontab", func() { + nodePool.Spec.Disruption.Budgets = []Budget{ + { + Nodes: "10", + Schedule: lo.ToPtr("@annually"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + }, + { + Nodes: "10", + Schedule: lo.ToPtr("*"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should fail when creating multiple budgets where one doesn't have both schedule and duration", func() { + nodePool.Spec.Disruption.Budgets = []Budget{ + { + Nodes: "10", + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + }, + { + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + }, + { + Nodes: "10", + }, + } + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + DescribeTable("should succeed when creating a budget with valid reasons", func(reason DisruptionReason) { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + Reasons: []DisruptionReason{reason}, + }} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }, + Entry("should allow disruption reason Drifted", DisruptionReasonDrifted), + Entry("should allow disruption reason Underutilized", DisruptionReasonUnderutilized), + Entry("should allow disruption reason Empty", DisruptionReasonEmpty), + ) + + DescribeTable("should fail when creating a budget with invalid reasons", func(reason string) { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + Reasons: []DisruptionReason{DisruptionReason(reason)}, + }} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }, + Entry("should not allow invalid reason", "invalid"), + Entry("should not allow expired disruption reason", "expired"), + Entry("should not allow empty reason", ""), + ) + + It("should allow setting multiple reasons", func() { + nodePool.Spec.Disruption.Budgets = []Budget{{ + Nodes: "10", + Schedule: lo.ToPtr("* * * * *"), + Duration: &metav1.Duration{Duration: lo.Must(time.ParseDuration("20m"))}, + Reasons: []DisruptionReason{DisruptionReasonDrifted, DisruptionReasonEmpty}, + }} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + }) + Context("Taints", func() { + It("should succeed for valid taints", func() { + nodePool.Spec.Template.Spec.Taints = []v1.Taint{ + {Key: "a", Value: "b", Effect: v1.TaintEffectNoSchedule}, + {Key: "c", Value: "d", Effect: v1.TaintEffectNoExecute}, + {Key: "e", Value: "f", Effect: v1.TaintEffectPreferNoSchedule}, + {Key: "Test", Value: "f", Effect: v1.TaintEffectPreferNoSchedule}, + {Key: "test.com/Test", Value: "f", Effect: v1.TaintEffectPreferNoSchedule}, + {Key: "test.com.com/test", Value: "f", Effect: v1.TaintEffectPreferNoSchedule}, + {Key: "key-only", Effect: v1.TaintEffectNoExecute}, + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + }) + It("should fail for invalid taint keys", func() { + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "test.com.com}", Effect: v1.TaintEffectNoSchedule}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "Test.com/test", Effect: v1.TaintEffectNoSchedule}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "test/test/test", Effect: v1.TaintEffectNoSchedule}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "test/", Effect: v1.TaintEffectNoSchedule}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "/test", Effect: v1.TaintEffectNoSchedule}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail at runtime for taint keys that are too long", func() { + oldNodePool := nodePool.DeepCopy() + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: fmt.Sprintf("test.com.test.%s/test", strings.ToLower(randomdata.Alphanumeric(250))), Effect: v1.TaintEffectNoSchedule}} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool = oldNodePool.DeepCopy() + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: fmt.Sprintf("test.com.test/test-%s", strings.ToLower(randomdata.Alphanumeric(250))), Effect: v1.TaintEffectNoSchedule}} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail for missing taint key", func() { + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Effect: v1.TaintEffectNoSchedule}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail for invalid taint value", func() { + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "invalid-value", Effect: v1.TaintEffectNoSchedule, Value: "???"}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail for invalid taint effect", func() { + nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "invalid-effect", Effect: "???"}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should not fail for same key with different effects", func() { + nodePool.Spec.Template.Spec.Taints = []v1.Taint{ + {Key: "a", Effect: v1.TaintEffectNoSchedule}, + {Key: "a", Effect: v1.TaintEffectNoExecute}, + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + }) + }) + Context("Requirements", func() { + It("should succeed for valid requirement keys", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "Test", Operator: v1.NodeSelectorOpExists}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "test.com/Test", Operator: v1.NodeSelectorOpExists}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "test.com.com/test", Operator: v1.NodeSelectorOpExists}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "key-only", Operator: v1.NodeSelectorOpExists}}, + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + }) + It("should fail for invalid requirement keys", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "test.com.com}", Operator: v1.NodeSelectorOpExists}}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "Test.com/test", Operator: v1.NodeSelectorOpExists}}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "test/test/test", Operator: v1.NodeSelectorOpExists}}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "test/", Operator: v1.NodeSelectorOpExists}}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "/test", Operator: v1.NodeSelectorOpExists}}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail at runtime for requirement keys that are too long", func() { + oldNodePool := nodePool.DeepCopy() + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: fmt.Sprintf("test.com.test.%s/test", strings.ToLower(randomdata.Alphanumeric(250))), Operator: v1.NodeSelectorOpExists}}} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool = oldNodePool.DeepCopy() + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: fmt.Sprintf("test.com.test/test-%s", strings.ToLower(randomdata.Alphanumeric(250))), Operator: v1.NodeSelectorOpExists}}} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail for the karpenter.sh/nodepool label", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: NodePoolLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{randomdata.SillyName()}}}} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should allow supported ops", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{"1"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{"1"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpExists}}, + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + }) + It("should fail for unsupported ops", func() { + for _, op := range []v1.NodeSelectorOperator{"unknown"} { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: op, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + } + }) + It("should fail for restricted domains", func() { + for label := range RestrictedLabelDomains { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + } + }) + It("should allow restricted domains exceptions", func() { + oldNodePool := nodePool.DeepCopy() + for label := range LabelDomainExceptions { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + nodePool = oldNodePool.DeepCopy() + } + }) + It("should allow restricted subdomains exceptions", func() { + oldNodePool := nodePool.DeepCopy() + for label := range LabelDomainExceptions { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "subdomain." + label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + nodePool = oldNodePool.DeepCopy() + } + }) + It("should allow well known label exceptions", func() { + oldNodePool := nodePool.DeepCopy() + for label := range WellKnownLabels.Difference(sets.New(NodePoolLabelKey)) { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + nodePool = oldNodePool.DeepCopy() + } + }) + It("should allow non-empty set after removing overlapped value", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test", "foo"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test", "bar"}}}, + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + }) + It("should allow empty requirements", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + }) + It("should fail with invalid GT or LT values", func() { + for _, requirement := range []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{"1", "2"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{"a"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpGt, Values: []string{"-1"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{"1", "2"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{"a"}}}, + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpLt, Values: []string{"-1"}}}, + } { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{requirement} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + } + }) + It("should error when minValues is negative", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"insance-type-1"}}, MinValues: lo.ToPtr(-1)}, + } + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should error when minValues is zero", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"insance-type-1"}}, MinValues: lo.ToPtr(0)}, + } + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should error when minValues is more than 50", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpExists}, MinValues: lo.ToPtr(51)}, + } + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + It("should allow more than 50 values if minValues is not specified.", func() { + var instanceTypes []string + for i := 0; i < 90; i++ { + instanceTypes = append(instanceTypes, "instance"+strconv.Itoa(i)) + } + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: instanceTypes}}, + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should error when minValues is greater than the number of unique values specified within In operator", func() { + nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"insance-type-1"}}, MinValues: lo.ToPtr(2)}, + } + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + }) + Context("Labels", func() { + It("should allow unrecognized labels", func() { + nodePool.Spec.Template.Labels = map[string]string{"foo": randomdata.SillyName()} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + }) + It("should fail for the karpenter.sh/nodepool label", func() { + nodePool.Spec.Template.Labels = map[string]string{NodePoolLabelKey: randomdata.SillyName()} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail for invalid label keys", func() { + nodePool.Spec.Template.Labels = map[string]string{"spaces are not allowed": randomdata.SillyName()} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail at runtime for label keys that are too long", func() { + oldNodePool := nodePool.DeepCopy() + nodePool.Spec.Template.Labels = map[string]string{fmt.Sprintf("test.com.test.%s/test", strings.ToLower(randomdata.Alphanumeric(250))): randomdata.SillyName()} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + nodePool = oldNodePool.DeepCopy() + nodePool.Spec.Template.Labels = map[string]string{fmt.Sprintf("test.com.test/test-%s", strings.ToLower(randomdata.Alphanumeric(250))): randomdata.SillyName()} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail for invalid label values", func() { + nodePool.Spec.Template.Labels = map[string]string{randomdata.SillyName(): "/ is not allowed"} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + }) + It("should fail for restricted label domains", func() { + for label := range RestrictedLabelDomains { + fmt.Println(label) + nodePool.Spec.Template.Labels = map[string]string{label + "/unknown": randomdata.SillyName()} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) + } + }) + It("should allow labels kOps require", func() { + nodePool.Spec.Template.Labels = map[string]string{ + "kops.k8s.io/instancegroup": "karpenter-nodes", + "kops.k8s.io/gpu": "1", + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + }) + It("should allow labels in restricted domains exceptions list", func() { + oldNodePool := nodePool.DeepCopy() + for label := range LabelDomainExceptions { + fmt.Println(label) + nodePool.Spec.Template.Labels = map[string]string{ + label: "test-value", + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + nodePool = oldNodePool.DeepCopy() + } + }) + It("should allow labels prefixed with the restricted domain exceptions", func() { + oldNodePool := nodePool.DeepCopy() + for label := range LabelDomainExceptions { + nodePool.Spec.Template.Labels = map[string]string{ + fmt.Sprintf("%s/key", label): "test-value", + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + nodePool = oldNodePool.DeepCopy() + } + }) + It("should allow subdomain labels in restricted domains exceptions list", func() { + oldNodePool := nodePool.DeepCopy() + for label := range LabelDomainExceptions { + nodePool.Spec.Template.Labels = map[string]string{ + fmt.Sprintf("subdomain.%s", label): "test-value", + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + nodePool = oldNodePool.DeepCopy() + } + }) + It("should allow subdomain labels prefixed with the restricted domain exceptions", func() { + oldNodePool := nodePool.DeepCopy() + for label := range LabelDomainExceptions { + nodePool.Spec.Template.Labels = map[string]string{ + fmt.Sprintf("subdomain.%s/key", label): "test-value", + } + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + Expect(env.Client.Delete(ctx, nodePool)).To(Succeed()) + Expect(nodePool.RuntimeValidate()).To(Succeed()) + nodePool = oldNodePool.DeepCopy() + } + }) + }) + Context("TerminationGracePeriod", func() { + It("should succeed on a positive terminationGracePeriod duration", func() { + nodePool.Spec.Template.Spec.TerminationGracePeriod = &metav1.Duration{Duration: time.Second * 300} + Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) + }) + It("should fail on a negative terminationGracePeriod duration", func() { + nodePool.Spec.Template.Spec.TerminationGracePeriod = &metav1.Duration{Duration: time.Second * -30} + Expect(env.Client.Create(ctx, nodePool)).ToNot(Succeed()) + }) + }) +}) diff --git a/pkg/apis/v1/register.go b/pkg/apis/v1/register.go new file mode 100644 index 0000000000..0682bc22c3 --- /dev/null +++ b/pkg/apis/v1/register.go @@ -0,0 +1,42 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + Group = "karpenter.sh" + CompatibilityGroup = "compatibility." + Group +) + +var ( + SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: "v1"} + SchemeBuilder = runtime.NewSchemeBuilder(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodePool{}, + &NodePoolList{}, + &NodeClaim{}, + &NodeClaimList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil + }) +) diff --git a/pkg/apis/v1/suite_test.go b/pkg/apis/v1/suite_test.go new file mode 100644 index 0000000000..ae46551fb1 --- /dev/null +++ b/pkg/apis/v1/suite_test.go @@ -0,0 +1,124 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "context" + "math/rand" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/samber/lo" + . "knative.dev/pkg/logging/testing" + + "sigs.k8s.io/karpenter/pkg/operator/scheme" + + "sigs.k8s.io/karpenter/pkg/apis" + v1 "sigs.k8s.io/karpenter/pkg/apis/v1" + "sigs.k8s.io/karpenter/pkg/cloudprovider/fake" + "sigs.k8s.io/karpenter/pkg/test" + . "sigs.k8s.io/karpenter/pkg/test/expectations" +) + +var ctx context.Context +var env *test.Environment +var cloudProvider *fake.CloudProvider + +func TestAPIs(t *testing.T) { + ctx = TestContextWithLogger(t) + RegisterFailHandler(Fail) + RunSpecs(t, "v1") +} + +var _ = BeforeSuite(func() { + env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...)) + cloudProvider = fake.NewCloudProvider() +}) + +var _ = AfterEach(func() { + ExpectCleanedUp(ctx, env.Client) +}) + +var _ = AfterSuite(func() { + Expect(env.Stop()).To(Succeed(), "Failed to stop environment") +}) + +var _ = Describe("OrderByWeight", func() { + It("should order the NodePools by weight", func() { + // Generate 10 NodePools that have random weights, some might have the same weights + var nodePools []v1.NodePool + for i := 0; i < 10; i++ { + np := &v1.NodePool{ + ObjectMeta: test.ObjectMeta(), + Spec: v1.NodePoolSpec{ + Weight: lo.ToPtr[int32](int32(rand.Intn(100) + 1)), //nolint:gosec + Template: v1.NodeClaimTemplate{ + Spec: v1.NodeClaimSpec{ + NodeClassRef: &v1.NodeClassReference{ + Name: "default", + }, + Requirements: []v1.NodeSelectorRequirementWithMinValues{}, + }, + }, + }, + } + nodePools = append(nodePools, *np) + } + + nodePools = lo.Shuffle(nodePools) + nodePoolList := v1.NodePoolList{Items: nodePools} + nodePoolList.OrderByWeight() + + lastWeight := 101 // This is above the allowed weight values + for _, np := range nodePoolList.Items { + Expect(lo.FromPtr(np.Spec.Weight)).To(BeNumerically("<=", lastWeight)) + lastWeight = int(lo.FromPtr(np.Spec.Weight)) + } + }) + It("should order the NodePools by name when the weights are the same", func() { + // Generate 10 NodePools with the same weight + var nodePools []v1.NodePool + for i := 0; i < 10; i++ { + np := &v1.NodePool{ + ObjectMeta: test.ObjectMeta(), + Spec: v1.NodePoolSpec{ + Weight: lo.ToPtr[int32](10), //nolint:gosec + Template: v1.NodeClaimTemplate{ + Spec: v1.NodeClaimSpec{ + NodeClassRef: &v1.NodeClassReference{ + Name: "default", + }, + Requirements: []v1.NodeSelectorRequirementWithMinValues{}, + }, + }, + }, + } + nodePools = append(nodePools, *np) + } + + nodePools = lo.Shuffle(nodePools) + nodePoolList := v1.NodePoolList{Items: nodePools} + nodePoolList.OrderByWeight() + + lastName := "zzzzzzzzzzzzzzzzzzzzzzzz" // large string value + for _, np := range nodePoolList.Items { + Expect(np.Name < lastName).To(BeTrue()) + lastName = np.Name + } + }) +}) diff --git a/pkg/apis/v1/taints.go b/pkg/apis/v1/taints.go new file mode 100644 index 0000000000..b504223003 --- /dev/null +++ b/pkg/apis/v1/taints.go @@ -0,0 +1,47 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + + "sigs.k8s.io/karpenter/pkg/apis/v1beta1" +) + +// Karpenter specific taints +const ( + DisruptedTaintKey = Group + "/disrupted" + UnregisteredTaintKey = Group + "/unregistered" +) + +var ( + // DisruptedNoScheduleTaint is applied by the disruption and termination controllers to nodes disrupted by Karpenter. + // This ensures no additional pods schedule to those nodes while they are terminating. + DisruptedNoScheduleTaint = v1.Taint{ + Key: DisruptedTaintKey, + Effect: v1.TaintEffectNoSchedule, + } + UnregisteredNoExecuteTaint = v1.Taint{ + Key: UnregisteredTaintKey, + Effect: v1.TaintEffectNoExecute, + } +) + +// IsDisruptingTaint checks if the taint is either the v1 or v1beta1 disruption taint. +func IsDisruptingTaint(taint v1.Taint) bool { + return taint.MatchTaint(&DisruptedNoScheduleTaint) || v1beta1.IsDisruptingTaint(taint) +} diff --git a/pkg/apis/v1/zz_generated.deepcopy.go b/pkg/apis/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..89c9d80730 --- /dev/null +++ b/pkg/apis/v1/zz_generated.deepcopy.go @@ -0,0 +1,488 @@ +//go:build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + "github.com/awslabs/operatorpkg/status" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + timex "time" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Budget) DeepCopyInto(out *Budget) { + *out = *in + if in.Reasons != nil { + in, out := &in.Reasons, &out.Reasons + *out = make([]DisruptionReason, len(*in)) + copy(*out, *in) + } + if in.Schedule != nil { + in, out := &in.Schedule, &out.Schedule + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Budget. +func (in *Budget) DeepCopy() *Budget { + if in == nil { + return nil + } + out := new(Budget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Disruption) DeepCopyInto(out *Disruption) { + *out = *in + in.ConsolidateAfter.DeepCopyInto(&out.ConsolidateAfter) + if in.Budgets != nil { + in, out := &in.Budgets, &out.Budgets + *out = make([]Budget, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Disruption. +func (in *Disruption) DeepCopy() *Disruption { + if in == nil { + return nil + } + out := new(Disruption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Limits) DeepCopyInto(out *Limits) { + { + in := &in + *out = make(Limits, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Limits. +func (in Limits) DeepCopy() Limits { + if in == nil { + return nil + } + out := new(Limits) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NillableDuration) DeepCopyInto(out *NillableDuration) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(timex.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NillableDuration. +func (in *NillableDuration) DeepCopy() *NillableDuration { + if in == nil { + return nil + } + out := new(NillableDuration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeClaim) DeepCopyInto(out *NodeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeClaim. +func (in *NodeClaim) DeepCopy() *NodeClaim { + if in == nil { + return nil + } + out := new(NodeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeClaimList) DeepCopyInto(out *NodeClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeClaimList. +func (in *NodeClaimList) DeepCopy() *NodeClaimList { + if in == nil { + return nil + } + out := new(NodeClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeClaimSpec) DeepCopyInto(out *NodeClaimSpec) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]corev1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartupTaints != nil { + in, out := &in.StartupTaints, &out.StartupTaints + *out = make([]corev1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Requirements != nil { + in, out := &in.Requirements, &out.Requirements + *out = make([]NodeSelectorRequirementWithMinValues, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.NodeClassRef != nil { + in, out := &in.NodeClassRef, &out.NodeClassRef + *out = new(NodeClassReference) + **out = **in + } + if in.TerminationGracePeriod != nil { + in, out := &in.TerminationGracePeriod, &out.TerminationGracePeriod + *out = new(metav1.Duration) + **out = **in + } + in.ExpireAfter.DeepCopyInto(&out.ExpireAfter) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeClaimSpec. +func (in *NodeClaimSpec) DeepCopy() *NodeClaimSpec { + if in == nil { + return nil + } + out := new(NodeClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeClaimStatus) DeepCopyInto(out *NodeClaimStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]status.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LastPodEventTime.DeepCopyInto(&out.LastPodEventTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeClaimStatus. +func (in *NodeClaimStatus) DeepCopy() *NodeClaimStatus { + if in == nil { + return nil + } + out := new(NodeClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeClaimTemplate) DeepCopyInto(out *NodeClaimTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeClaimTemplate. +func (in *NodeClaimTemplate) DeepCopy() *NodeClaimTemplate { + if in == nil { + return nil + } + out := new(NodeClaimTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeClassReference) DeepCopyInto(out *NodeClassReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeClassReference. +func (in *NodeClassReference) DeepCopy() *NodeClassReference { + if in == nil { + return nil + } + out := new(NodeClassReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePool) DeepCopyInto(out *NodePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePool. +func (in *NodePool) DeepCopy() *NodePool { + if in == nil { + return nil + } + out := new(NodePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolList) DeepCopyInto(out *NodePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolList. +func (in *NodePoolList) DeepCopy() *NodePoolList { + if in == nil { + return nil + } + out := new(NodePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolSpec) DeepCopyInto(out *NodePoolSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + in.Disruption.DeepCopyInto(&out.Disruption) + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(Limits, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolSpec. +func (in *NodePoolSpec) DeepCopy() *NodePoolSpec { + if in == nil { + return nil + } + out := new(NodePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolStatus) DeepCopyInto(out *NodePoolStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]status.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolStatus. +func (in *NodePoolStatus) DeepCopy() *NodePoolStatus { + if in == nil { + return nil + } + out := new(NodePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorRequirementWithMinValues) DeepCopyInto(out *NodeSelectorRequirementWithMinValues) { + *out = *in + in.NodeSelectorRequirement.DeepCopyInto(&out.NodeSelectorRequirement) + if in.MinValues != nil { + in, out := &in.MinValues, &out.MinValues + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirementWithMinValues. +func (in *NodeSelectorRequirementWithMinValues) DeepCopy() *NodeSelectorRequirementWithMinValues { + if in == nil { + return nil + } + out := new(NodeSelectorRequirementWithMinValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apis/v1beta1/nodepool.go b/pkg/apis/v1beta1/nodepool.go index 19848687e5..41215f19c5 100644 --- a/pkg/apis/v1beta1/nodepool.go +++ b/pkg/apis/v1beta1/nodepool.go @@ -177,6 +177,7 @@ type ObjectMeta struct { // NodePool is the Schema for the NodePools API // +kubebuilder:object:root=true +// +kubebuilder:storageversion // +kubebuilder:resource:path=nodepools,scope=Cluster,categories=karpenter // +kubebuilder:printcolumn:name="NodeClass",type="string",JSONPath=".spec.template.spec.nodeClassRef.name",description="" // +kubebuilder:printcolumn:name="Weight",type="string",JSONPath=".spec.weight",priority=1,description=""