From 3499d17985624965d67b34340e222e986bbbba8f Mon Sep 17 00:00:00 2001 From: Krzysztof Ostrowski Date: Wed, 21 Feb 2024 10:49:05 +0100 Subject: [PATCH] opts: enforce loopback (or mTLS) for H2C/id header h2c must happen on a loopback connection as the connection is not using TLS at all. identity headers must use a loopback connection or a mTLS conection is required. Trust in both directions is important. kube-rbac-proxy needs to provide certs, such that upstream can verify the authenticity of the headers. upstream needs certs, such that we can be sure to not leak secrets. --- .../app/options/proxyoptions.go | 97 +++++++- pkg/authn/identityheaders/identityheaders.go | 7 +- .../deployment-proxy-non-loopback.yaml | 35 +++ .../deployment-upstream-non-loopback.yaml | 36 +++ test/e2e/h2c_upstream.go | 24 +- test/e2e/identityheaders.go | 132 ++++++++++ .../default/clusterRole-client.yaml | 7 + .../identityheaders/default/clusterRole.yaml | 14 ++ .../default/clusterRoleBinding-client.yaml | 12 + .../default/clusterRoleBinding.yaml | 13 + .../default/configmap-nginx.yaml | 17 ++ .../identityheaders/default/deployment.yaml | 43 ++++ test/e2e/identityheaders/default/service.yaml | 14 ++ .../default/serviceAccount.yaml | 5 + .../insecure/clusterRole-client.yaml | 7 + .../identityheaders/insecure/clusterRole.yaml | 14 ++ .../insecure/clusterRoleBinding-client.yaml | 12 + .../insecure/clusterRoleBinding.yaml | 13 + .../insecure/deployment-proxy.yaml | 31 +++ .../insecure/deployment-upstream.yaml | 33 +++ .../e2e/identityheaders/insecure/service.yaml | 14 ++ .../insecure/serviceAccount.yaml | 5 + .../secure/clusterRole-client.yaml | 7 + .../identityheaders/secure/clusterRole.yaml | 14 ++ .../secure/clusterRoleBinding-client.yaml | 12 + .../secure/clusterRoleBinding.yaml | 13 + .../secure/configmap-nginx.yaml | 20 ++ .../secure/deployment-proxy.yaml | 68 ++++++ .../secure/deployment-upstream.yaml | 48 ++++ .../identityheaders/secure/service-proxy.yaml | 14 ++ .../secure/service-upstream.yaml | 14 ++ .../secure/serviceAccount.yaml | 5 + test/e2e/main_test.go | 1 + test/kubetest/client.go | 17 +- test/kubetest/kubernetes.go | 140 ++++++++++- test/kubetest/tls.go | 225 ++++++++++++++++++ 36 files changed, 1169 insertions(+), 14 deletions(-) create mode 100644 test/e2e/h2c-upstream/deployment-proxy-non-loopback.yaml create mode 100644 test/e2e/h2c-upstream/deployment-upstream-non-loopback.yaml create mode 100644 test/e2e/identityheaders.go create mode 100644 test/e2e/identityheaders/default/clusterRole-client.yaml create mode 100644 test/e2e/identityheaders/default/clusterRole.yaml create mode 100644 test/e2e/identityheaders/default/clusterRoleBinding-client.yaml create mode 100644 test/e2e/identityheaders/default/clusterRoleBinding.yaml create mode 100644 test/e2e/identityheaders/default/configmap-nginx.yaml create mode 100644 test/e2e/identityheaders/default/deployment.yaml create mode 100644 test/e2e/identityheaders/default/service.yaml create mode 100644 test/e2e/identityheaders/default/serviceAccount.yaml create mode 100644 test/e2e/identityheaders/insecure/clusterRole-client.yaml create mode 100644 test/e2e/identityheaders/insecure/clusterRole.yaml create mode 100644 test/e2e/identityheaders/insecure/clusterRoleBinding-client.yaml create mode 100644 test/e2e/identityheaders/insecure/clusterRoleBinding.yaml create mode 100644 test/e2e/identityheaders/insecure/deployment-proxy.yaml create mode 100644 test/e2e/identityheaders/insecure/deployment-upstream.yaml create mode 100644 test/e2e/identityheaders/insecure/service.yaml create mode 100644 test/e2e/identityheaders/insecure/serviceAccount.yaml create mode 100644 test/e2e/identityheaders/secure/clusterRole-client.yaml create mode 100644 test/e2e/identityheaders/secure/clusterRole.yaml create mode 100644 test/e2e/identityheaders/secure/clusterRoleBinding-client.yaml create mode 100644 test/e2e/identityheaders/secure/clusterRoleBinding.yaml create mode 100644 test/e2e/identityheaders/secure/configmap-nginx.yaml create mode 100644 test/e2e/identityheaders/secure/deployment-proxy.yaml create mode 100644 test/e2e/identityheaders/secure/deployment-upstream.yaml create mode 100644 test/e2e/identityheaders/secure/service-proxy.yaml create mode 100644 test/e2e/identityheaders/secure/service-upstream.yaml create mode 100644 test/e2e/identityheaders/secure/serviceAccount.yaml create mode 100644 test/kubetest/tls.go diff --git a/cmd/kube-rbac-proxy/app/options/proxyoptions.go b/cmd/kube-rbac-proxy/app/options/proxyoptions.go index b8640652c..4f33dcfcc 100644 --- a/cmd/kube-rbac-proxy/app/options/proxyoptions.go +++ b/cmd/kube-rbac-proxy/app/options/proxyoptions.go @@ -17,10 +17,16 @@ limitations under the License. package options import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" "fmt" + "net" "net/url" "os" "path" + "time" "github.com/ghodss/yaml" "github.com/spf13/pflag" @@ -36,8 +42,9 @@ import ( // ProxyOptions are options specific to the kube-rbac-proxy type ProxyOptions struct { - Upstream string - UpstreamForceH2C bool + Upstream string + UpstreamForceH2C bool + UpstreamDNSTimeout int UpstreamCAFile string UpstreamClientCertFile string @@ -57,6 +64,7 @@ type ProxyOptions struct { func (o *ProxyOptions) AddFlags(flagset *pflag.FlagSet) { flagset.StringVar(&o.Upstream, "upstream", "", "The upstream URL to proxy to once requests have successfully been authenticated and authorized.") flagset.BoolVar(&o.UpstreamForceH2C, "upstream-force-h2c", false, "Force h2c to communicate with the upstream. This is required when the upstream speaks h2c(http/2 cleartext - insecure variant of http/2) only. For example, go-grpc server in the insecure mode, such as helm's tiller w/o TLS, speaks h2c only") + flagset.IntVar(&o.UpstreamDNSTimeout, "upstream-dns-timeout", 5, "The timeout in seconds for DNS lookups of the upstream. If set to 0, no timeout is set.") // upstream tls options flagset.StringVar(&o.UpstreamCAFile, "upstream-ca-file", "", "The CA the upstream uses for TLS connection. This is required when the upstream uses TLS and its own CA certificate") @@ -103,6 +111,17 @@ func (o *ProxyOptions) Validate() []error { } } + // Verify secure connection settings, if necessary. + if identityheaders.HasIdentityHeadersEnabled(o.UpstreamHeader) || o.UpstreamForceH2C { + isSecure, err := hasSecureConnection(o) + if err != nil { + errs = append(errs, err) + } + if !isSecure { + errs = append(errs, errors.New("configuration requires a secure connection (mTLS or loopback) to the upstream (h2c/identity headers)")) + } + } + return errs } @@ -125,6 +144,7 @@ func (o *ProxyOptions) ApplyTo(c *server.KubeRBACProxyInfo, a *serverconfig.Auth } } + c.UpstreamHeaders = o.UpstreamHeader c.IgnorePaths = o.IgnorePaths c.AllowPaths = o.AllowPaths a.APIAudiences = o.TokenAudiences @@ -132,6 +152,79 @@ func (o *ProxyOptions) ApplyTo(c *server.KubeRBACProxyInfo, a *serverconfig.Auth return nil } +func hasSecureConnection(o *ProxyOptions) (bool, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.UpstreamDNSTimeout)*time.Second) + defer cancel() + + isLoopback, err := isLoopbackAddress(ctx, o.Upstream) + if err == nil && isLoopback { + return true, nil + } + + if o.UpstreamForceH2C { + return false, fmt.Errorf("loopback address is required for h2c") + } + + // isMTLSPossible emphasizes that we can't verify that upstream verifies the + // client's certificate. + isMTLSPossible, err := isMTLSConfigured(o.UpstreamClientCertFile, o.UpstreamClientKeyFile, o.UpstreamCAFile) + if !isMTLSPossible || err != nil { + return false, err + } + + return true, nil +} + +func isMTLSConfigured(upstreamClientCertPath, upstreamClientKeyPath, upstreamCAPath string) (bool, error) { + // Check if client is configured to provide a certificiate. + if len(upstreamClientCertPath) > 0 { + return false, nil + } + + _, err := tls.LoadX509KeyPair(upstreamClientCertPath, upstreamClientKeyPath) + if err != nil { + return false, fmt.Errorf("failed to read upstream client cert/key: %w", err) + } + + // Check if we have a CA to verify upstream. + upstreamCAPEM, err := os.ReadFile(upstreamCAPath) + if err != nil { + return false, fmt.Errorf("failed to read the upstream CA file: %w", err) + } + + upstreamCACertPool := x509.NewCertPool() + if ok := upstreamCACertPool.AppendCertsFromPEM(upstreamCAPEM); !ok { + return false, errors.New("error parsing upstream CA certificate") + } + + return true, nil +} + +func isLoopbackAddress(ctx context.Context, address string) (bool, error) { + u, err := url.Parse(address) + if err != nil { + return false, fmt.Errorf("failed to parse upstream URL: %w", err) + } + + ip := net.ParseIP(u.Hostname()) + if ip != nil { + return ip.IsLoopback(), nil + } + + ips, err := (&net.Resolver{}).LookupIPAddr(ctx, u.Hostname()) + if err != nil { + return false, fmt.Errorf("failed to lookup ip: %w", err) + } + + for _, ip := range ips { + if !ip.IP.IsLoopback() { + return false, nil + } + } + + return true, nil +} + type configfile struct { AuthorizationConfig *authz.AuthzConfig `json:"authorization,omitempty"` } diff --git a/pkg/authn/identityheaders/identityheaders.go b/pkg/authn/identityheaders/identityheaders.go index 98a063269..78354e779 100644 --- a/pkg/authn/identityheaders/identityheaders.go +++ b/pkg/authn/identityheaders/identityheaders.go @@ -38,8 +38,7 @@ type AuthnHeaderConfig struct { // WithAuthHeaders adds identity information to the headers. // Must not be used, if connection is not encrypted with TLS. func WithAuthHeaders(handler http.Handler, cfg *AuthnHeaderConfig) http.Handler { - upstreamHeadersEnabled := len(cfg.GroupsFieldName) > 0 || len(cfg.UserFieldName) > 0 - if !upstreamHeadersEnabled { + if !HasIdentityHeadersEnabled(cfg) { return handler } @@ -55,3 +54,7 @@ func WithAuthHeaders(handler http.Handler, cfg *AuthnHeaderConfig) http.Handler handler.ServeHTTP(w, req) }) } + +func HasIdentityHeadersEnabled(cfg *AuthnHeaderConfig) bool { + return len(cfg.GroupsFieldName) > 0 || len(cfg.UserFieldName) > 0 +} diff --git a/test/e2e/h2c-upstream/deployment-proxy-non-loopback.yaml b/test/e2e/h2c-upstream/deployment-proxy-non-loopback.yaml new file mode 100644 index 000000000..65b4e4049 --- /dev/null +++ b/test/e2e/h2c-upstream/deployment-proxy-non-loopback.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-rbac-proxy + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: kube-rbac-proxy + template: + metadata: + labels: + app: kube-rbac-proxy + spec: + serviceAccountName: kube-rbac-proxy + containers: + - name: kube-rbac-proxy + image: quay.io/brancz/kube-rbac-proxy:local + args: + - "--secure-port=8443" + - "--upstream=http://http-echo-service.default.svc.cluster.local:80/" + - "--authentication-skip-lookup" + - "--upstream-force-h2c=true" + - "--logtostderr=true" + - "--v=10" + ports: + - containerPort: 8443 + name: https + - name: prometheus-example-app + image: quay.io/brancz/prometheus-example-app:v0.4.0 + args: + - "--bind=127.0.0.1:8081" + - "--h2c=true" + diff --git a/test/e2e/h2c-upstream/deployment-upstream-non-loopback.yaml b/test/e2e/h2c-upstream/deployment-upstream-non-loopback.yaml new file mode 100644 index 000000000..a97936b73 --- /dev/null +++ b/test/e2e/h2c-upstream/deployment-upstream-non-loopback.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: http-echo + labels: + app: http-echo +spec: + replicas: 1 + selector: + matchLabels: + app: http-echo + template: + metadata: + labels: + app: http-echo + spec: + containers: + - name: http-echo + image: mendhak/http-https-echo + env: + - name: HTTP_PORT + value: 8080 + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: http-echo-service +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + app: http-echo + diff --git a/test/e2e/h2c_upstream.go b/test/e2e/h2c_upstream.go index c0ebbb471..305960fb4 100644 --- a/test/e2e/h2c_upstream.go +++ b/test/e2e/h2c_upstream.go @@ -29,7 +29,29 @@ func testH2CUpstream(client kubernetes.Interface) kubetest.TestSuite { command := `curl --connect-timeout 5 -v -s -k --fail -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://kube-rbac-proxy.default.svc.cluster.local:8443/metrics` kubetest.Scenario{ - Name: "With H2C Upstream", + Name: "With H2C non-local upstream", + + Given: kubetest.Actions( + kubetest.CreatedManifests( + client, + "h2c-upstream/clusterRole.yaml", + "h2c-upstream/clusterRoleBinding.yaml", + "h2c-upstream/deployment-proxy-non-loopback.yaml", + "h2c-upstream/deployment-upstream-non-loopback.yaml", + "h2c-upstream/service.yaml", + "h2c-upstream/serviceAccount.yaml", + ), + ), + Then: kubetest.Actions( + kubetest.PodIsCrashLoopBackOff( + client, + "kube-rbac-proxy", + ), + ), + }.Run(t) + + kubetest.Scenario{ + Name: "With H2C local upstream", Given: kubetest.Actions( kubetest.CreatedManifests( diff --git a/test/e2e/identityheaders.go b/test/e2e/identityheaders.go new file mode 100644 index 000000000..b7429253e --- /dev/null +++ b/test/e2e/identityheaders.go @@ -0,0 +1,132 @@ +/* +Copyright 2024 the kube-rbac-proxy maintainers. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "testing" + + "github.com/brancz/kube-rbac-proxy/test/kubetest" + "k8s.io/client-go/kubernetes" +) + +func testIdentityHeaders(client kubernetes.Interface) kubetest.TestSuite { + return func(t *testing.T) { + command := `curl --connect-timeout 5 -v -s -k --fail -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://kube-rbac-proxy.default.svc.cluster.local:8443/metrics` + + kubetest.Scenario{ + Name: "With x-remote-user", + Description: ` + Verifies that remote user is set to the service account, when + upstreama is listening on loopback through a HTTP connection. + `, + + Given: kubetest.Actions( + kubetest.CreatedManifests( + client, + "identityheaders/default/clusterRole-client.yaml", + "identityheaders/default/clusterRole.yaml", + "identityheaders/default/clusterRoleBinding-client.yaml", + "identityheaders/default/clusterRoleBinding.yaml", + "identityheaders/default/configmap-nginx.yaml", + "identityheaders/default/deployment.yaml", + "identityheaders/default/service.yaml", + "identityheaders/default/serviceAccount.yaml", + ), + ), + When: kubetest.Actions( + kubetest.PodsAreReady( + client, + 1, + "app=kube-rbac-proxy", + ), + kubetest.ServiceIsReady( + client, + "kube-rbac-proxy", + ), + ), + Then: kubetest.Actions( + kubetest.ClientLogsContain( + client, + command, + []string{`< x-remote-user: system:serviceaccount:default:default`}, + nil, + ), + ), + }.Run(t) + + kubetest.Scenario{ + Name: "With http on no loopback", + Description: ` + Verifies that the proxy is not able to connect to the remote upstream service, + if upstream isn't offering TLS, when identity headers are being used. + `, + + Given: kubetest.Actions( + kubetest.CreatedManifests( + client, + "identityheaders/insecure/clusterRole-client.yaml", + "identityheaders/insecure/clusterRole.yaml", + "identityheaders/insecure/clusterRoleBinding-client.yaml", + "identityheaders/insecure/clusterRoleBinding.yaml", + "identityheaders/insecure/deployment-proxy.yaml", + "identityheaders/insecure/deployment-upstream.yaml", + "identityheaders/insecure/service.yaml", + "identityheaders/insecure/serviceAccount.yaml", + ), + ), + Then: kubetest.Actions( + kubetest.PodIsCrashLoopBackOff( + client, + "kube-rbac-proxy", + ), + ), + }.Run(t) + + kubetest.Scenario{ + Name: "With https on no loopback", + Description: ` + Verifies that the proxy is able to connect to the remote upstream service, + through a mTLS connection, when providing identity headers. + `, + + Given: kubetest.Actions( + kubetest.CreateServerCerts(client, "nginx"), + kubetest.CreateClientCerts(client, "kube-rbac-proxy-client"), + kubetest.CreateServerCerts(client, "kube-rbac-proxy"), + kubetest.CreatedManifests( + client, + "identityheaders/secure/clusterRole-client.yaml", + "identityheaders/secure/clusterRole.yaml", + "identityheaders/secure/clusterRoleBinding-client.yaml", + "identityheaders/secure/clusterRoleBinding.yaml", + "identityheaders/secure/configmap-nginx.yaml", + "identityheaders/secure/deployment-proxy.yaml", + "identityheaders/secure/deployment-upstream.yaml", + "identityheaders/secure/service-proxy.yaml", + "identityheaders/secure/service-upstream.yaml", + "identityheaders/secure/serviceAccount.yaml", + ), + ), + Then: kubetest.Actions( + kubetest.ClientSucceeds( + client, + command, + nil, + ), + ), + }.Run(t) + } +} diff --git a/test/e2e/identityheaders/default/clusterRole-client.yaml b/test/e2e/identityheaders/default/clusterRole-client.yaml new file mode 100644 index 000000000..421a9d947 --- /dev/null +++ b/test/e2e/identityheaders/default/clusterRole-client.yaml @@ -0,0 +1,7 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics +rules: + - nonResourceURLs: ["/metrics"] + verbs: ["get"] diff --git a/test/e2e/identityheaders/default/clusterRole.yaml b/test/e2e/identityheaders/default/clusterRole.yaml new file mode 100644 index 000000000..e9bc500b7 --- /dev/null +++ b/test/e2e/identityheaders/default/clusterRole.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-rbac-proxy + namespace: default +rules: + - apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] diff --git a/test/e2e/identityheaders/default/clusterRoleBinding-client.yaml b/test/e2e/identityheaders/default/clusterRoleBinding-client.yaml new file mode 100644 index 000000000..4f93e2b8c --- /dev/null +++ b/test/e2e/identityheaders/default/clusterRoleBinding-client.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics +subjects: + - kind: ServiceAccount + name: default + namespace: default diff --git a/test/e2e/identityheaders/default/clusterRoleBinding.yaml b/test/e2e/identityheaders/default/clusterRoleBinding.yaml new file mode 100644 index 000000000..f7be8fa4e --- /dev/null +++ b/test/e2e/identityheaders/default/clusterRoleBinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-rbac-proxy + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-rbac-proxy +subjects: + - kind: ServiceAccount + name: kube-rbac-proxy + namespace: default diff --git a/test/e2e/identityheaders/default/configmap-nginx.yaml b/test/e2e/identityheaders/default/configmap-nginx.yaml new file mode 100644 index 000000000..d4705c531 --- /dev/null +++ b/test/e2e/identityheaders/default/configmap-nginx.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: nginx-config +data: + server.conf: |+ + server { + listen 8080; + server_name upstream_nginx; + + location /metrics { + add_header Content-Type text/plain; + add_header x-remote-user $http_x_remote_user; + return 200 'metrics endpoint reached\n'; + } + } diff --git a/test/e2e/identityheaders/default/deployment.yaml b/test/e2e/identityheaders/default/deployment.yaml new file mode 100644 index 000000000..750989782 --- /dev/null +++ b/test/e2e/identityheaders/default/deployment.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-rbac-proxy + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: kube-rbac-proxy + template: + metadata: + labels: + app: kube-rbac-proxy + spec: + serviceAccountName: kube-rbac-proxy + containers: + - name: kube-rbac-proxy + image: quay.io/brancz/kube-rbac-proxy:local + args: + - "--secure-port=8443" + - "--upstream=http://127.0.0.1:8080/" + - "--authentication-skip-lookup" + - "--logtostderr=true" + - "--auth-header-user-field-name=x-remote-user" + - "--auth-header-groups-field-name=x-remote-groups" + - "--auth-header-groups-field-separator=|" + - "--v=10" + ports: + - containerPort: 8443 + name: https + - name: nginx + image: nginx:latest + ports: + - containerPort: 8080 + volumeMounts: + - name: nginx-config + mountPath: "/etc/nginx/conf.d/server.conf" + subPath: server.conf + volumes: + - name: nginx-config + configMap: + name: nginx-config diff --git a/test/e2e/identityheaders/default/service.yaml b/test/e2e/identityheaders/default/service.yaml new file mode 100644 index 000000000..b1ae11686 --- /dev/null +++ b/test/e2e/identityheaders/default/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: kube-rbac-proxy + name: kube-rbac-proxy + namespace: default +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + app: kube-rbac-proxy diff --git a/test/e2e/identityheaders/default/serviceAccount.yaml b/test/e2e/identityheaders/default/serviceAccount.yaml new file mode 100644 index 000000000..45feecc9c --- /dev/null +++ b/test/e2e/identityheaders/default/serviceAccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-rbac-proxy + namespace: default diff --git a/test/e2e/identityheaders/insecure/clusterRole-client.yaml b/test/e2e/identityheaders/insecure/clusterRole-client.yaml new file mode 100644 index 000000000..421a9d947 --- /dev/null +++ b/test/e2e/identityheaders/insecure/clusterRole-client.yaml @@ -0,0 +1,7 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics +rules: + - nonResourceURLs: ["/metrics"] + verbs: ["get"] diff --git a/test/e2e/identityheaders/insecure/clusterRole.yaml b/test/e2e/identityheaders/insecure/clusterRole.yaml new file mode 100644 index 000000000..e9bc500b7 --- /dev/null +++ b/test/e2e/identityheaders/insecure/clusterRole.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-rbac-proxy + namespace: default +rules: + - apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] diff --git a/test/e2e/identityheaders/insecure/clusterRoleBinding-client.yaml b/test/e2e/identityheaders/insecure/clusterRoleBinding-client.yaml new file mode 100644 index 000000000..4f93e2b8c --- /dev/null +++ b/test/e2e/identityheaders/insecure/clusterRoleBinding-client.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics +subjects: + - kind: ServiceAccount + name: default + namespace: default diff --git a/test/e2e/identityheaders/insecure/clusterRoleBinding.yaml b/test/e2e/identityheaders/insecure/clusterRoleBinding.yaml new file mode 100644 index 000000000..f7be8fa4e --- /dev/null +++ b/test/e2e/identityheaders/insecure/clusterRoleBinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-rbac-proxy + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-rbac-proxy +subjects: + - kind: ServiceAccount + name: kube-rbac-proxy + namespace: default diff --git a/test/e2e/identityheaders/insecure/deployment-proxy.yaml b/test/e2e/identityheaders/insecure/deployment-proxy.yaml new file mode 100644 index 000000000..1e0b9abd9 --- /dev/null +++ b/test/e2e/identityheaders/insecure/deployment-proxy.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-rbac-proxy + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: kube-rbac-proxy + template: + metadata: + labels: + app: kube-rbac-proxy + spec: + serviceAccountName: kube-rbac-proxy + containers: + - name: kube-rbac-proxy + image: quay.io/brancz/kube-rbac-proxy:local + args: + - "--secure-port=8443" + - "--upstream=http://nginx.default.svc.cluster.local:80/" + - "--authentication-skip-lookup" + - "--logtostderr=true" + - "--auth-header-user-field-name=x-remote-user" + - "--auth-header-groups-field-name=x-remote-groups" + - "--auth-header-groups-field-separator=|" + - "--v=10" + ports: + - containerPort: 8443 + name: https diff --git a/test/e2e/identityheaders/insecure/deployment-upstream.yaml b/test/e2e/identityheaders/insecure/deployment-upstream.yaml new file mode 100644 index 000000000..93858c48b --- /dev/null +++ b/test/e2e/identityheaders/insecure/deployment-upstream.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app: nginx +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:latest + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx +spec: + ports: + - port: 80 + targetPort: 80 + selector: + app: nginx + diff --git a/test/e2e/identityheaders/insecure/service.yaml b/test/e2e/identityheaders/insecure/service.yaml new file mode 100644 index 000000000..b1ae11686 --- /dev/null +++ b/test/e2e/identityheaders/insecure/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: kube-rbac-proxy + name: kube-rbac-proxy + namespace: default +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + app: kube-rbac-proxy diff --git a/test/e2e/identityheaders/insecure/serviceAccount.yaml b/test/e2e/identityheaders/insecure/serviceAccount.yaml new file mode 100644 index 000000000..45feecc9c --- /dev/null +++ b/test/e2e/identityheaders/insecure/serviceAccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-rbac-proxy + namespace: default diff --git a/test/e2e/identityheaders/secure/clusterRole-client.yaml b/test/e2e/identityheaders/secure/clusterRole-client.yaml new file mode 100644 index 000000000..421a9d947 --- /dev/null +++ b/test/e2e/identityheaders/secure/clusterRole-client.yaml @@ -0,0 +1,7 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics +rules: + - nonResourceURLs: ["/metrics"] + verbs: ["get"] diff --git a/test/e2e/identityheaders/secure/clusterRole.yaml b/test/e2e/identityheaders/secure/clusterRole.yaml new file mode 100644 index 000000000..e9bc500b7 --- /dev/null +++ b/test/e2e/identityheaders/secure/clusterRole.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-rbac-proxy + namespace: default +rules: + - apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] diff --git a/test/e2e/identityheaders/secure/clusterRoleBinding-client.yaml b/test/e2e/identityheaders/secure/clusterRoleBinding-client.yaml new file mode 100644 index 000000000..4f93e2b8c --- /dev/null +++ b/test/e2e/identityheaders/secure/clusterRoleBinding-client.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics +subjects: + - kind: ServiceAccount + name: default + namespace: default diff --git a/test/e2e/identityheaders/secure/clusterRoleBinding.yaml b/test/e2e/identityheaders/secure/clusterRoleBinding.yaml new file mode 100644 index 000000000..f7be8fa4e --- /dev/null +++ b/test/e2e/identityheaders/secure/clusterRoleBinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-rbac-proxy + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-rbac-proxy +subjects: + - kind: ServiceAccount + name: kube-rbac-proxy + namespace: default diff --git a/test/e2e/identityheaders/secure/configmap-nginx.yaml b/test/e2e/identityheaders/secure/configmap-nginx.yaml new file mode 100644 index 000000000..249b28a1c --- /dev/null +++ b/test/e2e/identityheaders/secure/configmap-nginx.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-config +data: + server.conf: |+ + server { + listen 8443 ssl; + server_name upstream-nginx; + + ssl_certificate /etc/nginx/certs/tls.crt; + ssl_certificate_key /etc/nginx/keys/tls.key; + ssl_client_certificate /etc/nginx/client-ca/ca.crt; + ssl_verify_client on; + + location /metrics { + add_header Content-Type text/plain; + return 200 'metrics endpoint reached\n'; + } + } diff --git a/test/e2e/identityheaders/secure/deployment-proxy.yaml b/test/e2e/identityheaders/secure/deployment-proxy.yaml new file mode 100644 index 000000000..98897002f --- /dev/null +++ b/test/e2e/identityheaders/secure/deployment-proxy.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-rbac-proxy + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: kube-rbac-proxy + template: + metadata: + labels: + app: kube-rbac-proxy + spec: + serviceAccountName: kube-rbac-proxy + containers: + - name: kube-rbac-proxy + image: quay.io/brancz/kube-rbac-proxy:local + args: + - "--secure-port=8443" + - "--upstream=https://nginx.default.svc.cluster.local:8443/" + - "--authentication-skip-lookup" + - "--logtostderr=true" + - "--auth-header-user-field-name=x-remote-user" + - "--auth-header-groups-field-name=x-remote-groups" + - "--auth-header-groups-field-separator=|" + - "--tls-cert-file=/usr/local/etc/kube-rbac-proxy/server-certs/tls.crt" + - "--tls-private-key-file=/usr/local/etc/kube-rbac-proxy/server-keys/tls.key" + - "--upstream-ca-file=/usr/local/etc/kube-rbac-proxy/upstream-certs/ca.crt" + - "--upstream-client-cert-file=/usr/local/etc/kube-rbac-proxy/client-certs/tls.crt" + - "--upstream-client-key-file=/usr/local/etc/kube-rbac-proxy/client-keys/tls.key" + - "--v=10" + volumeMounts: + - name: kube-rbac-proxy-client-keys + mountPath: /usr/local/etc/kube-rbac-proxy/client-keys + readOnly: true + - name: kube-rbac-proxy-client-certs + mountPath: /usr/local/etc/kube-rbac-proxy/client-certs + readOnly: true + - name: nginx-certs + mountPath: /usr/local/etc/kube-rbac-proxy/upstream-certs + readOnly: true + - name: kube-rbac-proxy-keys + mountPath: /usr/local/etc/kube-rbac-proxy/server-keys + readOnly: true + - name: kube-rbac-proxy-certs + mountPath: /usr/local/etc/kube-rbac-proxy/server-certs + readOnly: true + ports: + - containerPort: 8443 + name: https + volumes: + - name: kube-rbac-proxy-client-keys + secret: + secretName: kube-rbac-proxy-client-keys + - name: kube-rbac-proxy-client-certs + configMap: + name: kube-rbac-proxy-client-certs + - name: nginx-certs + configMap: + name: nginx-certs + - name: kube-rbac-proxy-keys + secret: + secretName: kube-rbac-proxy-keys + - name: kube-rbac-proxy-certs + configMap: + name: kube-rbac-proxy-certs diff --git a/test/e2e/identityheaders/secure/deployment-upstream.yaml b/test/e2e/identityheaders/secure/deployment-upstream.yaml new file mode 100644 index 000000000..e6cc651ae --- /dev/null +++ b/test/e2e/identityheaders/secure/deployment-upstream.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + namespace: default + labels: + app: nginx +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:latest + ports: + - containerPort: 8443 + volumeMounts: + - name: nginx-certs + mountPath: "/etc/nginx/certs" + readOnly: true + - name: nginx-keys + mountPath: "/etc/nginx/keys" + readOnly: true + - name: kube-rbac-proxy-client-certs + mountPath: "/etc/nginx/client-ca" + readOnly: true + - name: nginx-config + mountPath: "/etc/nginx/conf.d/server.conf" + subPath: server.conf + volumes: + - name: nginx-config + configMap: + name: nginx-config + - name: nginx-certs + configMap: + name: nginx-certs + - name: nginx-keys + secret: + secretName: nginx-keys + - name: kube-rbac-proxy-client-certs + configMap: + name: kube-rbac-proxy-client-certs diff --git a/test/e2e/identityheaders/secure/service-proxy.yaml b/test/e2e/identityheaders/secure/service-proxy.yaml new file mode 100644 index 000000000..b1ae11686 --- /dev/null +++ b/test/e2e/identityheaders/secure/service-proxy.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: kube-rbac-proxy + name: kube-rbac-proxy + namespace: default +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + app: kube-rbac-proxy diff --git a/test/e2e/identityheaders/secure/service-upstream.yaml b/test/e2e/identityheaders/secure/service-upstream.yaml new file mode 100644 index 000000000..12ef92776 --- /dev/null +++ b/test/e2e/identityheaders/secure/service-upstream.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx + namespace: default + labels: + app: nginx +spec: + ports: + - name: https + port: 8443 + targetPort: 8443 + selector: + app: nginx diff --git a/test/e2e/identityheaders/secure/serviceAccount.yaml b/test/e2e/identityheaders/secure/serviceAccount.yaml new file mode 100644 index 000000000..45feecc9c --- /dev/null +++ b/test/e2e/identityheaders/secure/serviceAccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-rbac-proxy + namespace: default diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index 98bdcf2f7..70b133eec 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -53,6 +53,7 @@ func Test(t *testing.T) { tests := map[string]kubetest.TestSuite{ "Basics": testBasics(client), "H2CUpstream": testH2CUpstream(client), + "IdentityHeaders": testIdentityHeaders(client), "ClientCertificates": testClientCertificates(client), "TokenAudience": testTokenAudience(client), "AllowPath": testAllowPathsRegexp(client), diff --git a/test/kubetest/client.go b/test/kubetest/client.go index 5d8bf7dfe..26ae09d2c 100644 --- a/test/kubetest/client.go +++ b/test/kubetest/client.go @@ -16,7 +16,22 @@ limitations under the License. package kubetest -import "k8s.io/client-go/kubernetes" +import ( + "k8s.io/client-go/kubernetes" +) + +func ClientLogsContain(client kubernetes.Interface, command string, logEntries []string, opts *RunOptions) Action { + return func(ctx *ScenarioContext) error { + return RunHasLogEntry( + client, + "quay.io/brancz/krp-curl:v0.0.2", + "kube-rbac-proxy-client", + []string{"/bin/sh", "-c", command}, + logEntries, + opts, + )(ctx) + } +} func ClientSucceeds(client kubernetes.Interface, command string, opts *RunOptions) Action { return func(ctx *ScenarioContext) error { diff --git a/test/kubetest/kubernetes.go b/test/kubetest/kubernetes.go index af6711339..121b00422 100644 --- a/test/kubetest/kubernetes.go +++ b/test/kubetest/kubernetes.go @@ -19,6 +19,8 @@ package kubetest import ( "bytes" "context" + "crypto/x509" + "encoding/pem" "fmt" "io" "os" @@ -36,6 +38,71 @@ import ( "k8s.io/client-go/kubernetes" ) +func CreateServerCerts(client kubernetes.Interface, name string) Action { + return createCerts(client, name, createSignedServerCert) +} + +func CreateClientCerts(client kubernetes.Interface, name string) Action { + return createCerts(client, name, createSignedClientCert) +} + +// Panic panics. It is used to stop the tests after setting up the manfiests. +func Panic(_ *ScenarioContext) error { + panic("happy debugging") +} + +func createCerts(client kubernetes.Interface, name string, createSignedCert certer) Action { + return func(ctx *ScenarioContext) error { + caCert, caKey, err := createSelfSignedCA(fmt.Sprintf("%s-ca", name)) + if err != nil { + return err + } + cert, key, err := createSignedCert(caCert, caKey, fmt.Sprintf("%s.default.svc.cluster.local", name)) + if err != nil { + return err + } + + configMapName := fmt.Sprintf("%s-certs", name) + _, err = client.CoreV1().ConfigMaps(ctx.Namespace).Create(context.TODO(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + }, + Data: map[string]string{ + "ca.crt": string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw})), + "tls.crt": string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})), + }, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + ctx.AddCleanUp(func() error { + return client.CoreV1().ConfigMaps(ctx.Namespace).Delete(context.TODO(), configMapName, metav1.DeleteOptions{}) + }) + + secretName := fmt.Sprintf("%s-keys", name) + _, err = client.CoreV1().Secrets(ctx.Namespace).Create(context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + }, + Data: map[string][]byte{ + "tls.key": []byte(pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(key), + })), + "ca.key": []byte(pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(caKey), + })), + }, + }, metav1.CreateOptions{}) + ctx.AddCleanUp(func() error { + return client.CoreV1().Secrets(ctx.Namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{}) + }) + + return err + } +} + func CreatedManifests(client kubernetes.Interface, paths ...string) Action { return func(ctx *ScenarioContext) error { for _, path := range paths { @@ -150,7 +217,7 @@ func createDeployment(client kubernetes.Interface, ctx *ScenarioContext, content return err } - dumpLogs(client, ctx, metav1.ListOptions{LabelSelector: sel.String()}) + logsFprintf(client, ctx, metav1.ListOptions{LabelSelector: sel.String()}, os.Stdout) err = client.AppsV1().Deployments(dep.Namespace).Delete(context.TODO(), dep.Name, metav1.DeleteOptions{}) if err != nil { @@ -163,7 +230,7 @@ func createDeployment(client kubernetes.Interface, ctx *ScenarioContext, content return err } -func dumpLogs(client kubernetes.Interface, ctx *ScenarioContext, opts metav1.ListOptions) { +func logsFprintf(client kubernetes.Interface, ctx *ScenarioContext, opts metav1.ListOptions, w io.Writer) { pods, err := client.CoreV1().Pods(ctx.Namespace).List(context.TODO(), opts) if err != nil { return @@ -171,19 +238,19 @@ func dumpLogs(client kubernetes.Interface, ctx *ScenarioContext, opts metav1.Lis for _, p := range pods.Items { for _, c := range p.Spec.Containers { - fmt.Println("=== LOGS", ctx.Namespace, p.Name, c.Name) + fmt.Fprintf(w, "\n=== LOGS %s %s %s", ctx.Namespace, p.Name, c.Name) - rest := client.CoreV1().Pods(ctx.Namespace).GetLogs(p.GetName(), &corev1.PodLogOptions{ + req := client.CoreV1().Pods(ctx.Namespace).GetLogs(p.GetName(), &corev1.PodLogOptions{ Container: c.Name, Follow: false, }) - stream, err := rest.Stream(context.TODO()) + stream, err := req.Stream(context.TODO()) if err != nil { return } - _, _ = io.Copy(os.Stdout, stream) + _, _ = io.Copy(w, stream) } } } @@ -264,6 +331,32 @@ func createConfigmap(client kubernetes.Interface, ctx *ScenarioContext, content return err } +// PodIsCrashLoopBackOff checks if there is any pod that is in the state of CrashLoopBackOff. +// This is required to verify negative test cases. +func PodIsCrashLoopBackOff(client kubernetes.Interface, labels string) func(*ScenarioContext) error { + return func(ctx *ScenarioContext) error { + return wait.Poll(time.Second, time.Minute, func() (bool, error) { + list, err := client.CoreV1().Pods(ctx.Namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return false, fmt.Errorf("failed to list pods: %v", err) + } + + for _, p := range list.Items { + for _, c := range p.Status.ContainerStatuses { + if c.State.Waiting != nil && + c.Name == labels && + c.State.Waiting.Reason == "CrashLoopBackOff" { + + return true, nil + } + } + } + + return false, nil + }) + } +} + // PodsAreReady waits for a number if replicas matching the given labels to be ready. // Returns a func directly (not Setup or Conditions) as it can be used in Given and When steps func PodsAreReady(client kubernetes.Interface, replicas int, labels string) func(*ScenarioContext) error { @@ -364,6 +457,7 @@ type RunOptions struct { ServiceAccount string TokenAudience string ClientCertificates bool + OutputStream io.Writer // Functional Options would be better } func RunSucceeds(client kubernetes.Interface, image string, name string, command []string, opts *RunOptions) Action { @@ -385,6 +479,29 @@ func RunFails(client kubernetes.Interface, image string, name string, command [] } } +func RunHasLogEntry(client kubernetes.Interface, image string, name string, command, logEntries []string, opts *RunOptions) Action { + return func(ctx *ScenarioContext) error { + if opts == nil { + opts = &RunOptions{} + } + + builder := &strings.Builder{} + opts.OutputStream = builder + if err := run(client, ctx, image, name, command, opts); err != nil { + return fmt.Errorf("failed to run: %v", err) + } + + output := builder.String() + for _, entry := range logEntries { + if !strings.Contains(output, entry) { + return fmt.Errorf("log entry not found: %s", entry) + } + } + + return nil + } +} + var errRun = fmt.Errorf("failed to run") // run the command and return the Check with the container's logs @@ -487,6 +604,13 @@ func run(client kubernetes.Interface, ctx *ScenarioContext, image string, name s return fmt.Errorf("failed to watch job: %v", err) } + if opts == nil { + opts = &RunOptions{} + } + if opts.OutputStream == nil { + opts.OutputStream = os.Stdout + } + for event := range watch.ResultChan() { job := event.Object.(*batchv1.Job) conditions := job.Status.Conditions @@ -499,7 +623,7 @@ func run(client kubernetes.Interface, ctx *ScenarioContext, image string, name s } if failed { - dumpLogs(client, ctx, metav1.ListOptions{LabelSelector: "job-name=" + batch.Name}) + logsFprintf(client, ctx, metav1.ListOptions{LabelSelector: "job-name=" + batch.Name}, opts.OutputStream) return errRun } @@ -510,7 +634,7 @@ func run(client kubernetes.Interface, ctx *ScenarioContext, image string, name s } } if complete && !failed { - dumpLogs(client, ctx, metav1.ListOptions{LabelSelector: "job-name=" + batch.Name}) + logsFprintf(client, ctx, metav1.ListOptions{LabelSelector: "job-name=" + batch.Name}, opts.OutputStream) return nil } } diff --git a/test/kubetest/tls.go b/test/kubetest/tls.go new file mode 100644 index 000000000..58c11111e --- /dev/null +++ b/test/kubetest/tls.go @@ -0,0 +1,225 @@ +/* +Copyright 2024 the kube-rbac-proxy maintainers. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package kubetest + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math/big" + "time" +) + +var ( + year = 365 * 24 * time.Hour + minimalRSAKeySize = 2048 +) + +type certer func(*x509.Certificate, *rsa.PrivateKey, string) (*x509.Certificate, *rsa.PrivateKey, error) + +func createSignedClientCert(cacert *x509.Certificate, caPrivateKey *rsa.PrivateKey, name string) (*x509.Certificate, *rsa.PrivateKey, error) { + // Generate a private key. + privateKey, err := rsa.GenerateKey(rand.Reader, minimalRSAKeySize) + if err != nil { + return nil, nil, err + } + + // Generate subject key id. + subjectKeyID := sha1.Sum(privateKey.PublicKey.N.Bytes()) + authorityKeyID := cacert.SubjectKeyId + + // Generate serial number with at least 20 bits of entropy. + serialNumber, err := generateSerialNumber() + if err != nil { + return nil, nil, err + } + + // Create certificate template. + template := &x509.Certificate{ + Subject: pkix.Name{CommonName: name}, + + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().Add(year), + + SerialNumber: serialNumber, + SubjectKeyId: subjectKeyID[:], + AuthorityKeyId: authorityKeyID, + + SignatureAlgorithm: x509.SHA256WithRSA, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + + BasicConstraintsValid: true, + } + + // Sign Certificate + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + cacert, + privateKey.Public(), + caPrivateKey, + ) + if err != nil { + return nil, nil, err + } + + // Parse Certificate into x509.Certificate. + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + return nil, nil, fmt.Errorf("expected 1 certificate, got %d", len(certs)) + } + + return certs[0], privateKey, nil +} + +func createSignedServerCert(caCert *x509.Certificate, caPrivateKey *rsa.PrivateKey, dnsName string) (*x509.Certificate, *rsa.PrivateKey, error) { + // Generate a private key. + privateKey, err := rsa.GenerateKey(rand.Reader, minimalRSAKeySize) + if err != nil { + return nil, nil, err + } + + // Generate subject key id. + subjectKeyID := sha1.Sum(privateKey.PublicKey.N.Bytes()) + authorityKeyID := caCert.SubjectKeyId + + // Generate serial number with at least 20 bits of entropy. + serialNumber, err := generateSerialNumber() + if err != nil { + return nil, nil, err + } + + // Create certificate template. + template := &x509.Certificate{ + Subject: pkix.Name{CommonName: dnsName}, + + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().Add(year), + + SerialNumber: serialNumber, + SubjectKeyId: subjectKeyID[:], + AuthorityKeyId: authorityKeyID, + + SignatureAlgorithm: x509.SHA256WithRSA, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + + DNSNames: []string{dnsName}, + + BasicConstraintsValid: true, + } + + // Sign Certificate + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + caCert, + privateKey.Public(), + caPrivateKey, + ) + if err != nil { + return nil, nil, err + } + + // Parse Certificate into x509.Certificate. + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + return nil, nil, fmt.Errorf("expected 1 certificate, got %d", len(certs)) + } + + return certs[0], privateKey, nil +} + +func createSelfSignedCA(name string) (*x509.Certificate, *rsa.PrivateKey, error) { + // Generate a private key. + privateKey, err := rsa.GenerateKey(rand.Reader, minimalRSAKeySize) + if err != nil { + return nil, nil, err + } + + // Generate authority key id and subject key id. + keyID := sha1.Sum(privateKey.PublicKey.N.Bytes()) + + // Generate serial number with at least 20 bits of entropy. + serialNumber, err := generateSerialNumber() + if err != nil { + return nil, nil, err + } + + // Create certificate template. + template := &x509.Certificate{ + Subject: pkix.Name{CommonName: name}, + + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().Add(year), + + SerialNumber: serialNumber, + AuthorityKeyId: keyID[:], + SubjectKeyId: keyID[:], + + SignatureAlgorithm: x509.SHA256WithRSA, + + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + + BasicConstraintsValid: true, + IsCA: true, + } + + // Sign Certificate + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + template, + privateKey.Public(), + privateKey, + ) + if err != nil { + return nil, nil, err + } + + // Parse Certificate into x509.Certificate. + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + return nil, nil, fmt.Errorf("expected 1 certificate, got %d", len(certs)) + } + + return certs[0], privateKey, nil +} + +func generateSerialNumber() (*big.Int, error) { + max := new(big.Int).Lsh(big.NewInt(1), 63) + serialNumber, err := rand.Int(rand.Reader, max) + if err != nil { + return nil, err + } + + return serialNumber, nil +}