From 781aa8744e841a0cac7783ef8cef63c6a0d7a168 Mon Sep 17 00:00:00 2001 From: Angel Misevski Date: Wed, 17 Nov 2021 14:23:15 -0500 Subject: [PATCH 1/7] Add controller RBAC for creating LocalSubjectAccessReviews Allow the controller to create LocalSubjectAccessReviews in order to review user permissions when access DevWorkspace features. Signed-off-by: Angel Misevski --- controllers/workspace/devworkspace_controller.go | 1 + deploy/templates/components/rbac/role.yaml | 7 +++++++ pkg/webhook/cluster_roles.go | 1 + 3 files changed, 9 insertions(+) diff --git a/controllers/workspace/devworkspace_controller.go b/controllers/workspace/devworkspace_controller.go index f5727ec04..e030dc825 100644 --- a/controllers/workspace/devworkspace_controller.go +++ b/controllers/workspace/devworkspace_controller.go @@ -84,6 +84,7 @@ type DevWorkspaceReconciler struct { // +kubebuilder:rbac:groups="",resources=namespaces;events,verbs=get;list;watch // +kubebuilder:rbac:groups="batch",resources=jobs,verbs=get;create;list;watch;update;patch;delete // +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=mutatingwebhookconfigurations;validatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews;localsubjectaccessreviews,verbs=create // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings;clusterroles;clusterrolebindings,verbs=get;list;watch;create;update // +kubebuilder:rbac:groups=oauth.openshift.io,resources=oauthclients,verbs=get;list;watch;create;update;patch;delete;deletecollection // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;create diff --git a/deploy/templates/components/rbac/role.yaml b/deploy/templates/components/rbac/role.yaml index 375e663cd..db454dfab 100644 --- a/deploy/templates/components/rbac/role.yaml +++ b/deploy/templates/components/rbac/role.yaml @@ -106,6 +106,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/pkg/webhook/cluster_roles.go b/pkg/webhook/cluster_roles.go index c6229eecc..7d4d75680 100755 --- a/pkg/webhook/cluster_roles.go +++ b/pkg/webhook/cluster_roles.go @@ -120,6 +120,7 @@ func getSpecClusterRole() (*v1.ClusterRole, error) { }, Resources: []string{ "subjectaccessreviews", + "localsubjectaccessreviews", }, Verbs: []string{ "create", From abdf48eb5dd52a97f0680b5a538d7f5747da7a0e Mon Sep 17 00:00:00 2001 From: Angel Misevski Date: Wed, 17 Nov 2021 14:24:30 -0500 Subject: [PATCH 2/7] Regenerate files to include LocalSubjectAccessReviews RBAC Signed-off-by: Angel Misevski --- .../devworkspace-operator.clusterserviceversion.yaml | 7 +++++++ deploy/deployment/kubernetes/combined.yaml | 7 +++++++ .../objects/devworkspace-controller-role.ClusterRole.yaml | 7 +++++++ deploy/deployment/openshift/combined.yaml | 7 +++++++ .../objects/devworkspace-controller-role.ClusterRole.yaml | 7 +++++++ 5 files changed, 35 insertions(+) diff --git a/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml b/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml index cdf347aeb..b6ecee34e 100644 --- a/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml +++ b/deploy/bundle/manifests/devworkspace-operator.clusterserviceversion.yaml @@ -180,6 +180,13 @@ spec: - get - list - watch + - apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/deploy/deployment/kubernetes/combined.yaml b/deploy/deployment/kubernetes/combined.yaml index cb05d3453..230b0f569 100644 --- a/deploy/deployment/kubernetes/combined.yaml +++ b/deploy/deployment/kubernetes/combined.yaml @@ -19808,6 +19808,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml b/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml index 39ac50e5a..38829dceb 100644 --- a/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml +++ b/deploy/deployment/kubernetes/objects/devworkspace-controller-role.ClusterRole.yaml @@ -107,6 +107,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/deploy/deployment/openshift/combined.yaml b/deploy/deployment/openshift/combined.yaml index 10297864d..ed316fa6f 100644 --- a/deploy/deployment/openshift/combined.yaml +++ b/deploy/deployment/openshift/combined.yaml @@ -19808,6 +19808,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: diff --git a/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml b/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml index 39ac50e5a..38829dceb 100644 --- a/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml +++ b/deploy/deployment/openshift/objects/devworkspace-controller-role.ClusterRole.yaml @@ -107,6 +107,13 @@ rules: - get - list - watch +- apiGroups: + - authorization.k8s.io + resources: + - localsubjectaccessreviews + - subjectaccessreviews + verbs: + - create - apiGroups: - batch resources: From 9263531f43b220519ac0b581a3ca33908479e8a9 Mon Sep 17 00:00:00 2001 From: Angel Misevski Date: Wed, 17 Nov 2021 16:25:20 -0500 Subject: [PATCH 3/7] Add additional SCCs attribute and use webhooks to verify authorization Add attribute 'controller.devfile.io/scc' which defines additional SCCs to add to the workspace service account. Add a MutatingWebhook check that reviews the requesters permissions for access the given SCC. Functionality only enabled on OpenShift. Signed-off-by: Angel Misevski --- pkg/constants/attributes.go | 14 +++ webhook/workspace/handler/access_control.go | 109 ++++++++++++++++++++ webhook/workspace/handler/workspace.go | 12 ++- 3 files changed, 133 insertions(+), 2 deletions(-) create mode 100644 webhook/workspace/handler/access_control.go diff --git a/pkg/constants/attributes.go b/pkg/constants/attributes.go index 33adfb7e9..f91c8d365 100644 --- a/pkg/constants/attributes.go +++ b/pkg/constants/attributes.go @@ -37,6 +37,20 @@ const ( // value: VAL_2 WorkspaceEnvAttribute = "workspaceEnv" + // WorkspaceSCCAttribute defines additional SCCs that should be added to the DevWorkspace. The user adding + // this attribute to a workspace must have the RBAC permissions to "use" the SCC with the given name. For example, + // to add the 'anyuid' SCC to the workspace Pod, the DevWorkspace should contain + // + // spec: + // template: + // attributes: + // controller.devfile.io/scc: "anyuid" + // + // Creating a workspace with this attribute, or updating an existing workspace to include this attribute will fail + // if the user making the request does not have the "use" permission for the "anyuid" SCC. + // Only supported on OpenShift. + WorkspaceSCCAttribute = "controller.devfile.io/scc" + // ProjectCloneAttribute configures how the DevWorkspace will treat project cloning. By default, an init container // will be added to the workspace deployment to clone projects to the workspace before it starts. This attribute // must be applied to top-level attributes field in the DevWorkspace. diff --git a/webhook/workspace/handler/access_control.go b/webhook/workspace/handler/access_control.go new file mode 100644 index 000000000..008c26458 --- /dev/null +++ b/webhook/workspace/handler/access_control.go @@ -0,0 +1,109 @@ +// Copyright (c) 2019-2021 Red Hat, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package handler + +import ( + "context" + "fmt" + + dwv2 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/devworkspace-operator/pkg/constants" + "github.com/devfile/devworkspace-operator/pkg/infrastructure" + v1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// validateUserPermissions validates that the user making the request has permissions to create/update the given workspace. +// In case we're validating a workspace on creation, parameter oldWksp should be set to nil. Returns an error if the user +// cannot perform the requested changes, or if an unexpected error occurs. +// Note: we only perform validation on v1alpha2 DevWorkspaces at the moment, as v1alpha1 DevWorkspaces do not support attributes. +func (h *WebhookHandler) validateUserPermissions(ctx context.Context, req admission.Request, newWksp, oldWksp *dwv2.DevWorkspace) error { + if !newWksp.Spec.Template.Attributes.Exists(constants.WorkspaceSCCAttribute) { + // Workspace is not requesting anything we need to check RBAC for. + return nil + } + + var attributeDecodeErr error + newSCCAttr := newWksp.Spec.Template.Attributes.GetString(constants.WorkspaceSCCAttribute, &attributeDecodeErr) + if attributeDecodeErr != nil { + return fmt.Errorf("failed to read %s attribute in DevWorkspace: %s", constants.WorkspaceSCCAttribute, attributeDecodeErr) + } + + if oldWksp != nil && oldWksp.Spec.Template.Attributes.Exists(constants.WorkspaceSCCAttribute) { + // If we're updating a DevWorkspace, check RBAC only if the relevant attribute is modified to avoid performing too many SARs. + oldSCCAttr := oldWksp.Spec.Template.Attributes.GetString(constants.WorkspaceSCCAttribute, &attributeDecodeErr) + if attributeDecodeErr != nil { + return fmt.Errorf("failed to read %s attribute in DevWorkspace: %s", constants.WorkspaceSCCAttribute, attributeDecodeErr) + } + if oldSCCAttr == newSCCAttr { + // RBAC has already been checked for this setting, don't recheck + return nil + } + if oldSCCAttr != newSCCAttr { + // Don't allow attribute to be changed once it is set, otherwise we can't clean up the SCC when the workspace is deleted. + return fmt.Errorf("%s attribute cannot be modified after being set -- workspace must be deleted", constants.WorkspaceSCCAttribute) + } + } + + if err := h.validateOpenShiftSCC(ctx, req, newSCCAttr); err != nil { + return err + } + + return nil +} + +func (h *WebhookHandler) validateOpenShiftSCC(ctx context.Context, req admission.Request, scc string) error { + if !infrastructure.IsOpenShift() { + // We can only check the appropriate capability on OpenShift currently (via securitycontextconstraints.security.openshift.io) + // so forbid using this attribute + return fmt.Errorf("specifying additional SCCs is only permitted on OpenShift") + } + + if scc == "" { + return fmt.Errorf("empty value for attribute %s is invalid", constants.WorkspaceSCCAttribute) + } + + sar := &v1.LocalSubjectAccessReview{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: req.Namespace, + }, + Spec: v1.SubjectAccessReviewSpec{ + ResourceAttributes: &v1.ResourceAttributes{ + Namespace: req.Namespace, + Verb: "use", + Group: "security.openshift.io", + Resource: "securitycontextconstraints", + Name: scc, + }, + User: req.UserInfo.Username, + Groups: req.UserInfo.Groups, + UID: req.UserInfo.UID, + }, + } + + err := h.Client.Create(ctx, sar) + if err != nil { + return fmt.Errorf("failed to create subjectaccessreview for request: %w", err) + } + + if !sar.Status.Allowed { + if sar.Status.Reason != "" { + return fmt.Errorf("user is not permitted to use the %s SCC: %s", scc, sar.Status.Reason) + } + return fmt.Errorf("user is not permitted use the %s SCC", scc) + } + + return nil +} diff --git a/webhook/workspace/handler/workspace.go b/webhook/workspace/handler/workspace.go index 35e11722b..9f3d2d94b 100644 --- a/webhook/workspace/handler/workspace.go +++ b/webhook/workspace/handler/workspace.go @@ -40,7 +40,7 @@ func (h *WebhookHandler) MutateWorkspaceV1alpha1OnCreate(_ context.Context, req return h.returnPatched(req, wksp) } -func (h *WebhookHandler) MutateWorkspaceV1alpha2OnCreate(_ context.Context, req admission.Request) admission.Response { +func (h *WebhookHandler) MutateWorkspaceV1alpha2OnCreate(ctx context.Context, req admission.Request) admission.Response { wksp := &dwv2.DevWorkspace{} err := h.Decoder.Decode(req, wksp) if err != nil { @@ -49,6 +49,10 @@ func (h *WebhookHandler) MutateWorkspaceV1alpha2OnCreate(_ context.Context, req wksp.Labels = maputils.Append(wksp.Labels, constants.DevWorkspaceCreatorLabel, req.UserInfo.UID) + if err := h.validateUserPermissions(ctx, req, wksp, nil); err != nil { + return admission.Denied(err.Error()) + } + return h.returnPatched(req, wksp) } @@ -85,7 +89,7 @@ func (h *WebhookHandler) MutateWorkspaceV1alpha1OnUpdate(_ context.Context, req return admission.Allowed("new devworkspace has the same devworkspace creator as old one") } -func (h *WebhookHandler) MutateWorkspaceV1alpha2OnUpdate(_ context.Context, req admission.Request) admission.Response { +func (h *WebhookHandler) MutateWorkspaceV1alpha2OnUpdate(ctx context.Context, req admission.Request) admission.Response { newWksp := &dwv2.DevWorkspace{} oldWksp := &dwv2.DevWorkspace{} err := h.parse(req, oldWksp, newWksp) @@ -97,6 +101,10 @@ func (h *WebhookHandler) MutateWorkspaceV1alpha2OnUpdate(_ context.Context, req return admission.Denied(msg) } + if err := h.validateUserPermissions(ctx, req, newWksp, oldWksp); err != nil { + return admission.Denied(err.Error()) + } + oldCreator, found := oldWksp.Labels[constants.DevWorkspaceCreatorLabel] if !found { return admission.Denied(fmt.Sprintf("label '%s' is missing. Please recreate devworkspace to get it initialized", constants.DevWorkspaceCreatorLabel)) From e682bf1f3ca3c62044c037b5828b5d176af8a0e2 Mon Sep 17 00:00:00 2001 From: Angel Misevski Date: Wed, 17 Nov 2021 16:27:00 -0500 Subject: [PATCH 4/7] Add SCC to workspace ServiceAccount when specified in DevWorkspace. Look into 'controller.devfile.io/scc' attribute on DevWorkspaces and attempt to add scc listed there to the workspace serviceAccount. This requires additional privileges for the DW controller which are not provided by default (must be granted explicitly by a cluster-admin) Signed-off-by: Angel Misevski --- main.go | 4 ++ pkg/provision/workspace/serviceaccount.go | 55 +++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/main.go b/main.go index 0648dbf5c..82305eaa4 100644 --- a/main.go +++ b/main.go @@ -38,6 +38,7 @@ import ( oauthv1 "github.com/openshift/api/oauth/v1" routev1 "github.com/openshift/api/route/v1" + securityv1 "github.com/openshift/api/security/v1" templatev1 "github.com/openshift/api/template/v1" corev1 "k8s.io/api/core/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" @@ -75,6 +76,9 @@ func init() { utilruntime.Must(routev1.Install(scheme)) utilruntime.Must(templatev1.Install(scheme)) utilruntime.Must(oauthv1.Install(scheme)) + // Enable controller to manage SCCs in OpenShift; permissions to do this are not requested + // by default and must be added by a cluster-admin. + utilruntime.Must(securityv1.Install(scheme)) } // +kubebuilder:scaffold:scheme diff --git a/pkg/provision/workspace/serviceaccount.go b/pkg/provision/workspace/serviceaccount.go index 6f2858c75..1cfb46b6e 100644 --- a/pkg/provision/workspace/serviceaccount.go +++ b/pkg/provision/workspace/serviceaccount.go @@ -16,11 +16,16 @@ package workspace import ( + "fmt" + dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" "github.com/devfile/devworkspace-operator/pkg/constants" "github.com/devfile/devworkspace-operator/pkg/provision/sync" + securityv1 "github.com/openshift/api/security/v1" corev1 "k8s.io/api/core/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/devfile/devworkspace-operator/pkg/common" @@ -79,6 +84,17 @@ func SyncServiceAccount( return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{Err: err}} } + if workspace.Spec.Template.Attributes.Exists(constants.WorkspaceSCCAttribute) { + sccName := workspace.Spec.Template.Attributes.GetString(constants.WorkspaceSCCAttribute, nil) + retry, err := addSCCToServiceAccount(specSA.Name, specSA.Namespace, sccName, clusterAPI) + if err != nil { + return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{FailStartup: true, Err: err}} + } + if retry { + return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{Requeue: true}} + } + } + return ServiceAcctProvisioningStatus{ ProvisioningStatus: ProvisioningStatus{ Continue: true, @@ -86,3 +102,42 @@ func SyncServiceAccount( ServiceAccountName: saName, } } + +func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.ClusterAPI) (retry bool, err error) { + serviceaccount := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, saName) + + // TODO: Check if we can access the SCC + + scc := &securityv1.SecurityContextConstraints{} + if err := clusterAPI.Client.Get(clusterAPI.Ctx, types.NamespacedName{Name: sccName}, scc); err != nil { + switch { + case k8sErrors.IsUnauthorized(err): + return false, fmt.Errorf("operator does not have permissions to get the '%s' SecurityContextConstraint", sccName) + case k8sErrors.IsNotFound(err): + return false, fmt.Errorf("requested SCC '%s' not found on cluster", sccName) + default: + return false, err + } + } + + for _, user := range scc.Users { + if user == serviceaccount { + // This serviceaccount is already added to the SCC + return false, nil + } + } + + scc.Users = append(scc.Users, serviceaccount) + if err := clusterAPI.Client.Update(clusterAPI.Ctx, scc); err != nil { + switch { + case k8sErrors.IsUnauthorized(err): + return false, fmt.Errorf("operator does not have permissions to update the '%s' SecurityContextConstraint", sccName) + case k8sErrors.IsConflict(err): + return true, nil + default: + return false, err + } + } + + return false, nil +} From 193caf3d3d7dbd25c4d1f2c33f026f5aac53912e Mon Sep 17 00:00:00 2001 From: Angel Misevski Date: Thu, 18 Nov 2021 13:10:45 -0500 Subject: [PATCH 5/7] Check list/watch access to SCCs before attempting to use them The way that controller-runtime caches works means that errors in listing and watching a resource results in cluster interactions that require the cache hanging and errors being logged asynchronously. In order to use the cached client we have to check for list/watch access. Signed-off-by: Angel Misevski --- .../workspace/devworkspace_controller.go | 4 +- pkg/provision/workspace/serviceaccount.go | 70 +++++++++++++++++-- 2 files changed, 67 insertions(+), 7 deletions(-) diff --git a/controllers/workspace/devworkspace_controller.go b/controllers/workspace/devworkspace_controller.go index e030dc825..aa9d661f7 100644 --- a/controllers/workspace/devworkspace_controller.go +++ b/controllers/workspace/devworkspace_controller.go @@ -355,7 +355,9 @@ func (r *DevWorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request } serviceAcctStatus := wsprovision.SyncServiceAccount(workspace, saAnnotations, clusterAPI) if !serviceAcctStatus.Continue { - // FailStartup is not possible for generating the serviceaccount + if serviceAcctStatus.FailStartup { + return r.failWorkspace(workspace, serviceAcctStatus.Message, metrics.ReasonBadRequest, reqLogger, &reconcileStatus) + } reqLogger.Info("Waiting for workspace ServiceAccount") reconcileStatus.setConditionFalse(dw.DevWorkspaceServiceAccountReady, "Waiting for DevWorkspace ServiceAccount") if !serviceAcctStatus.Requeue && serviceAcctStatus.Err == nil { diff --git a/pkg/provision/workspace/serviceaccount.go b/pkg/provision/workspace/serviceaccount.go index 1cfb46b6e..50f17593f 100644 --- a/pkg/provision/workspace/serviceaccount.go +++ b/pkg/provision/workspace/serviceaccount.go @@ -22,6 +22,7 @@ import ( "github.com/devfile/devworkspace-operator/pkg/constants" "github.com/devfile/devworkspace-operator/pkg/provision/sync" securityv1 "github.com/openshift/api/security/v1" + v1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -79,7 +80,7 @@ func SyncServiceAccount( case *sync.NotInSyncError: return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{Requeue: true}} case *sync.UnrecoverableSyncError: - return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{FailStartup: true, Err: t.Cause}} + return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{FailStartup: true, Message: t.Cause.Error()}} default: return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{Err: err}} } @@ -88,7 +89,7 @@ func SyncServiceAccount( sccName := workspace.Spec.Template.Attributes.GetString(constants.WorkspaceSCCAttribute, nil) retry, err := addSCCToServiceAccount(specSA.Name, specSA.Namespace, sccName, clusterAPI) if err != nil { - return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{FailStartup: true, Err: err}} + return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{FailStartup: true, Message: err.Error()}} } if retry { return ServiceAcctProvisioningStatus{ProvisioningStatus: ProvisioningStatus{Requeue: true}} @@ -106,15 +107,22 @@ func SyncServiceAccount( func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.ClusterAPI) (retry bool, err error) { serviceaccount := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, saName) - // TODO: Check if we can access the SCC + canList, canWatch, err := checkControllerSCCAccess(sccName, clusterAPI) + if err != nil { + return false, fmt.Errorf("failed to check access to %s SecurityContextConstraints: %w", sccName, err) + } else if !canList { + return false, fmt.Errorf("controller is not permitted to list SecurityContextConstraints") + } else if !canWatch { + return false, fmt.Errorf("controller is not permitted to watch SecurityContextConstraints") + } scc := &securityv1.SecurityContextConstraints{} if err := clusterAPI.Client.Get(clusterAPI.Ctx, types.NamespacedName{Name: sccName}, scc); err != nil { switch { case k8sErrors.IsUnauthorized(err): - return false, fmt.Errorf("operator does not have permissions to get the '%s' SecurityContextConstraint", sccName) + return false, fmt.Errorf("operator does not have permissions to get the '%s' SecurityContextConstraints", sccName) case k8sErrors.IsNotFound(err): - return false, fmt.Errorf("requested SCC '%s' not found on cluster", sccName) + return false, fmt.Errorf("requested SecurityContextConstraints '%s' not found on cluster", sccName) default: return false, err } @@ -131,7 +139,7 @@ func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.C if err := clusterAPI.Client.Update(clusterAPI.Ctx, scc); err != nil { switch { case k8sErrors.IsUnauthorized(err): - return false, fmt.Errorf("operator does not have permissions to update the '%s' SecurityContextConstraint", sccName) + return false, fmt.Errorf("operator does not have permissions to update the '%s' SecurityContextConstraints", sccName) case k8sErrors.IsConflict(err): return true, nil default: @@ -141,3 +149,53 @@ func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.C return false, nil } + +// checkControllerSCCAccess checks RBAC *prerequisites* for managing SecurityContextConstraints on OpenShift. +// Checking this specifically is required the controller does not have these permissions by default, and the +// internal cache in controller-runtime attempts to use listWatches under the hood. Attempting to use SCCs in +// workspaces without the additional RBAC will lock the reconcile as any client actions that depend on the +// cache will fail to return an error. +func checkControllerSCCAccess(sccName string, clusterAPI sync.ClusterAPI) (canList, canWatch bool, err error) { + // Controller caching functionality depends on being able to list and watch resources. Errors due to these + // RBAC verbs not being available will be thrown out-of-band with the reconcile and cannot be detected by + // e.g. a failed Get() + listSSAR := &v1.SelfSubjectAccessReview{ + Spec: v1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &v1.ResourceAttributes{ + Verb: "list", + Group: "security.openshift.io", + Version: "v1", + Resource: "securitycontextconstraints", + Name: sccName, + }, + }, + } + if err := clusterAPI.Client.Create(clusterAPI.Ctx, listSSAR); err != nil { + return false, false, err + } + + if !listSSAR.Status.Allowed { + return false, false, nil + } + + watchSSAR := &v1.SelfSubjectAccessReview{ + Spec: v1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &v1.ResourceAttributes{ + Verb: "watch", + Group: "security.openshift.io", + Version: "v1", + Resource: "securitycontextconstraints", + Name: sccName, + }, + }, + } + if err := clusterAPI.Client.Create(clusterAPI.Ctx, watchSSAR); err != nil { + return true, false, err + } + + if !watchSSAR.Status.Allowed { + return true, false, nil + } + + return true, true, nil +} From 4940238c54e1ea2ed2dd4173773ab7deb0949fa2 Mon Sep 17 00:00:00 2001 From: Angel Misevski Date: Thu, 18 Nov 2021 13:26:48 -0500 Subject: [PATCH 6/7] Use non-caching client for managing SCCs to limit RBAC required. Signed-off-by: Angel Misevski --- .../workspace/devworkspace_controller.go | 14 ++-- main.go | 13 +++- pkg/provision/sync/cluster_api.go | 9 +-- pkg/provision/workspace/serviceaccount.go | 68 ++----------------- 4 files changed, 27 insertions(+), 77 deletions(-) diff --git a/controllers/workspace/devworkspace_controller.go b/controllers/workspace/devworkspace_controller.go index aa9d661f7..af50194a1 100644 --- a/controllers/workspace/devworkspace_controller.go +++ b/controllers/workspace/devworkspace_controller.go @@ -70,8 +70,9 @@ const ( // DevWorkspaceReconciler reconciles a DevWorkspace object type DevWorkspaceReconciler struct { client.Client - Log logr.Logger - Scheme *runtime.Scheme + NonCachingClient client.Client + Log logr.Logger + Scheme *runtime.Scheme } /////// CRD-related RBAC roles @@ -99,10 +100,11 @@ type DevWorkspaceReconciler struct { func (r *DevWorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (reconcileResult ctrl.Result, err error) { reqLogger := r.Log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) clusterAPI := sync.ClusterAPI{ - Client: r.Client, - Scheme: r.Scheme, - Logger: reqLogger, - Ctx: ctx, + Client: r.Client, + NonCachingClient: r.NonCachingClient, + Scheme: r.Scheme, + Logger: reqLogger, + Ctx: ctx, } // Fetch the Workspace instance diff --git a/main.go b/main.go index 82305eaa4..2d1c0210a 100644 --- a/main.go +++ b/main.go @@ -126,6 +126,12 @@ func main() { os.Exit(1) } + nonCachingClient, err := client.New(mgr.GetConfig(), client.Options{Scheme: scheme}) + if err != nil { + setupLog.Error(err, "unable to initialize non-caching client") + os.Exit(1) + } + // Index Events on involvedObject.name to allow us to get events involving a DevWorkspace's pod(s). This is used to // check for issues that prevent the pod from starting, so that DevWorkspaces aren't just hanging indefinitely. if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Event{}, "involvedObject.name", func(obj client.Object) []string { @@ -147,9 +153,10 @@ func main() { os.Exit(1) } if err = (&workspacecontroller.DevWorkspaceReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("DevWorkspace"), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + NonCachingClient: nonCachingClient, + Log: ctrl.Log.WithName("controllers").WithName("DevWorkspace"), + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "DevWorkspace") os.Exit(1) diff --git a/pkg/provision/sync/cluster_api.go b/pkg/provision/sync/cluster_api.go index a8a7bbdf9..85ce31f31 100644 --- a/pkg/provision/sync/cluster_api.go +++ b/pkg/provision/sync/cluster_api.go @@ -24,10 +24,11 @@ import ( ) type ClusterAPI struct { - Client crclient.Client - Scheme *runtime.Scheme - Logger logr.Logger - Ctx context.Context + Client crclient.Client + NonCachingClient crclient.Client + Scheme *runtime.Scheme + Logger logr.Logger + Ctx context.Context } // NotInSyncError is returned when a spec object is out-of-sync with its cluster counterpart diff --git a/pkg/provision/workspace/serviceaccount.go b/pkg/provision/workspace/serviceaccount.go index 50f17593f..d9db10fbe 100644 --- a/pkg/provision/workspace/serviceaccount.go +++ b/pkg/provision/workspace/serviceaccount.go @@ -22,7 +22,6 @@ import ( "github.com/devfile/devworkspace-operator/pkg/constants" "github.com/devfile/devworkspace-operator/pkg/provision/sync" securityv1 "github.com/openshift/api/security/v1" - v1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -107,19 +106,10 @@ func SyncServiceAccount( func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.ClusterAPI) (retry bool, err error) { serviceaccount := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, saName) - canList, canWatch, err := checkControllerSCCAccess(sccName, clusterAPI) - if err != nil { - return false, fmt.Errorf("failed to check access to %s SecurityContextConstraints: %w", sccName, err) - } else if !canList { - return false, fmt.Errorf("controller is not permitted to list SecurityContextConstraints") - } else if !canWatch { - return false, fmt.Errorf("controller is not permitted to watch SecurityContextConstraints") - } - scc := &securityv1.SecurityContextConstraints{} - if err := clusterAPI.Client.Get(clusterAPI.Ctx, types.NamespacedName{Name: sccName}, scc); err != nil { + if err := clusterAPI.NonCachingClient.Get(clusterAPI.Ctx, types.NamespacedName{Name: sccName}, scc); err != nil { switch { - case k8sErrors.IsUnauthorized(err): + case k8sErrors.IsForbidden(err): return false, fmt.Errorf("operator does not have permissions to get the '%s' SecurityContextConstraints", sccName) case k8sErrors.IsNotFound(err): return false, fmt.Errorf("requested SecurityContextConstraints '%s' not found on cluster", sccName) @@ -136,9 +126,9 @@ func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.C } scc.Users = append(scc.Users, serviceaccount) - if err := clusterAPI.Client.Update(clusterAPI.Ctx, scc); err != nil { + if err := clusterAPI.NonCachingClient.Update(clusterAPI.Ctx, scc); err != nil { switch { - case k8sErrors.IsUnauthorized(err): + case k8sErrors.IsForbidden(err): return false, fmt.Errorf("operator does not have permissions to update the '%s' SecurityContextConstraints", sccName) case k8sErrors.IsConflict(err): return true, nil @@ -149,53 +139,3 @@ func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.C return false, nil } - -// checkControllerSCCAccess checks RBAC *prerequisites* for managing SecurityContextConstraints on OpenShift. -// Checking this specifically is required the controller does not have these permissions by default, and the -// internal cache in controller-runtime attempts to use listWatches under the hood. Attempting to use SCCs in -// workspaces without the additional RBAC will lock the reconcile as any client actions that depend on the -// cache will fail to return an error. -func checkControllerSCCAccess(sccName string, clusterAPI sync.ClusterAPI) (canList, canWatch bool, err error) { - // Controller caching functionality depends on being able to list and watch resources. Errors due to these - // RBAC verbs not being available will be thrown out-of-band with the reconcile and cannot be detected by - // e.g. a failed Get() - listSSAR := &v1.SelfSubjectAccessReview{ - Spec: v1.SelfSubjectAccessReviewSpec{ - ResourceAttributes: &v1.ResourceAttributes{ - Verb: "list", - Group: "security.openshift.io", - Version: "v1", - Resource: "securitycontextconstraints", - Name: sccName, - }, - }, - } - if err := clusterAPI.Client.Create(clusterAPI.Ctx, listSSAR); err != nil { - return false, false, err - } - - if !listSSAR.Status.Allowed { - return false, false, nil - } - - watchSSAR := &v1.SelfSubjectAccessReview{ - Spec: v1.SelfSubjectAccessReviewSpec{ - ResourceAttributes: &v1.ResourceAttributes{ - Verb: "watch", - Group: "security.openshift.io", - Version: "v1", - Resource: "securitycontextconstraints", - Name: sccName, - }, - }, - } - if err := clusterAPI.Client.Create(clusterAPI.Ctx, watchSSAR); err != nil { - return true, false, err - } - - if !watchSSAR.Status.Allowed { - return true, false, nil - } - - return true, true, nil -} From 74780ccb36b3ac0f60c8fbd8983b7299d54d0c2d Mon Sep 17 00:00:00 2001 From: Angel Misevski Date: Fri, 19 Nov 2021 15:55:20 -0500 Subject: [PATCH 7/7] Add finalizer for SA SCCs when they're used and clean up on deletion Signed-off-by: Angel Misevski --- .../workspace/devworkspace_controller.go | 9 ++- controllers/workspace/finalize.go | 46 +++++++++++-- pkg/provision/workspace/serviceaccount.go | 67 +++++++++++++++++++ 3 files changed, 115 insertions(+), 7 deletions(-) diff --git a/controllers/workspace/devworkspace_controller.go b/controllers/workspace/devworkspace_controller.go index af50194a1..9a317ec5a 100644 --- a/controllers/workspace/devworkspace_controller.go +++ b/controllers/workspace/devworkspace_controller.go @@ -260,7 +260,7 @@ func (r *DevWorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request // Set finalizer on DevWorkspace if necessary // Note: we need to check the flattened workspace to see if a finalizer is needed, as plugins could require storage - if isFinalizerNecessary(workspace, storageProvisioner) { + if storageProvisioner.NeedsStorage(&workspace.Spec.Template) { coputil.AddFinalizer(clusterWorkspace, storageCleanupFinalizer) if err := r.Update(ctx, clusterWorkspace); err != nil { return reconcile.Result{}, err @@ -367,6 +367,13 @@ func (r *DevWorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request } return reconcile.Result{Requeue: serviceAcctStatus.Requeue}, serviceAcctStatus.Err } + if wsprovision.NeedsServiceAccountFinalizer(&workspace.Spec.Template) { + coputil.AddFinalizer(clusterWorkspace, serviceAccountCleanupFinalizer) + if err := r.Update(ctx, clusterWorkspace); err != nil { + return reconcile.Result{}, err + } + } + serviceAcctName := serviceAcctStatus.ServiceAccountName reconcileStatus.setConditionTrue(dw.DevWorkspaceServiceAccountReady, "DevWorkspace serviceaccount ready") diff --git a/controllers/workspace/finalize.go b/controllers/workspace/finalize.go index d28a17723..a7c1d3275 100644 --- a/controllers/workspace/finalize.go +++ b/controllers/workspace/finalize.go @@ -33,13 +33,23 @@ import ( ) const ( - storageCleanupFinalizer = "storage.controller.devfile.io" + storageCleanupFinalizer = "storage.controller.devfile.io" + serviceAccountCleanupFinalizer = "serviceaccount.controller.devfile.io" ) -func (r *DevWorkspaceReconciler) finalize(ctx context.Context, log logr.Logger, workspace *dw.DevWorkspace) (reconcile.Result, error) { - if !coputil.HasFinalizer(workspace, storageCleanupFinalizer) { - return reconcile.Result{}, nil +func (r *DevWorkspaceReconciler) workspaceNeedsFinalize(workspace *dw.DevWorkspace) bool { + for _, finalizer := range workspace.Finalizers { + if finalizer == storageCleanupFinalizer { + return true + } + if finalizer == serviceAccountCleanupFinalizer { + return true + } } + return false +} + +func (r *DevWorkspaceReconciler) finalize(ctx context.Context, log logr.Logger, workspace *dw.DevWorkspace) (reconcile.Result, error) { workspace.Status.Message = "Cleaning up resources for deletion" workspace.Status.Phase = devworkspacePhaseTerminating err := r.Client.Status().Update(ctx, workspace) @@ -47,6 +57,18 @@ func (r *DevWorkspaceReconciler) finalize(ctx context.Context, log logr.Logger, return reconcile.Result{}, err } + for _, finalizer := range workspace.Finalizers { + switch finalizer { + case storageCleanupFinalizer: + return r.finalizeStorage(ctx, log, workspace) + case serviceAccountCleanupFinalizer: + return r.finalizeServiceAccount(ctx, log, workspace) + } + } + return reconcile.Result{}, nil +} + +func (r *DevWorkspaceReconciler) finalizeStorage(ctx context.Context, log logr.Logger, workspace *dw.DevWorkspace) (reconcile.Result, error) { // Need to make sure Deployment is cleaned up before starting job to avoid mounting issues for RWO PVCs wait, err := wsprovision.DeleteWorkspaceDeployment(ctx, workspace, r.Client) if err != nil { @@ -98,8 +120,20 @@ func (r *DevWorkspaceReconciler) finalize(ctx context.Context, log logr.Logger, return reconcile.Result{}, r.Update(ctx, workspace) } -func isFinalizerNecessary(workspace *dw.DevWorkspace, provisioner storage.Provisioner) bool { - return provisioner.NeedsStorage(&workspace.Spec.Template) +func (r *DevWorkspaceReconciler) finalizeServiceAccount(ctx context.Context, log logr.Logger, workspace *dw.DevWorkspace) (reconcile.Result, error) { + retry, err := wsprovision.FinalizeServiceAccount(workspace, ctx, r.NonCachingClient) + if err != nil { + log.Error(err, "Failed to finalize workspace ServiceAccount") + failedStatus := currentStatus{phase: dw.DevWorkspaceStatusError} + failedStatus.setConditionTrue(dw.DevWorkspaceError, err.Error()) + return r.updateWorkspaceStatus(workspace, r.Log, &failedStatus, reconcile.Result{}, nil) + } + if retry { + return reconcile.Result{Requeue: true}, nil + } + log.Info("ServiceAccount clean up successful; clearing finalizer") + coputil.RemoveFinalizer(workspace, serviceAccountCleanupFinalizer) + return reconcile.Result{}, r.Update(ctx, workspace) } func (r *DevWorkspaceReconciler) namespaceIsTerminating(ctx context.Context, namespace string) (bool, error) { diff --git a/pkg/provision/workspace/serviceaccount.go b/pkg/provision/workspace/serviceaccount.go index d9db10fbe..3aafddaf0 100644 --- a/pkg/provision/workspace/serviceaccount.go +++ b/pkg/provision/workspace/serviceaccount.go @@ -16,6 +16,7 @@ package workspace import ( + "context" "fmt" dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" @@ -26,6 +27,7 @@ import ( k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + crclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/devfile/devworkspace-operator/pkg/common" @@ -103,6 +105,27 @@ func SyncServiceAccount( } } +func NeedsServiceAccountFinalizer(workspace *dw.DevWorkspaceTemplateSpec) bool { + if !workspace.Attributes.Exists(constants.WorkspaceSCCAttribute) { + return false + } + if workspace.Attributes.GetString(constants.WorkspaceSCCAttribute, nil) == "" { + return false + } + return true +} + +func FinalizeServiceAccount(workspace *dw.DevWorkspace, ctx context.Context, nonCachingClient crclient.Client) (retry bool, err error) { + saName := common.ServiceAccountName(workspace.Status.DevWorkspaceId) + namespace := workspace.Namespace + if !workspace.Spec.Template.Attributes.Exists(constants.WorkspaceSCCAttribute) { + return false, nil + } + sccName := workspace.Spec.Template.Attributes.GetString(constants.WorkspaceSCCAttribute, nil) + + return removeSCCFromServiceAccount(saName, namespace, sccName, ctx, nonCachingClient) +} + func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.ClusterAPI) (retry bool, err error) { serviceaccount := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, saName) @@ -139,3 +162,47 @@ func addSCCToServiceAccount(saName, namespace, sccName string, clusterAPI sync.C return false, nil } + +func removeSCCFromServiceAccount(saName, namespace, sccName string, ctx context.Context, nonCachingClient crclient.Client) (retry bool, err error) { + serviceaccount := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, saName) + + scc := &securityv1.SecurityContextConstraints{} + if err := nonCachingClient.Get(ctx, types.NamespacedName{Name: sccName}, scc); err != nil { + switch { + case k8sErrors.IsForbidden(err): + return false, fmt.Errorf("operator does not have permissions to get the '%s' SecurityContextConstraints", sccName) + case k8sErrors.IsNotFound(err): + return false, fmt.Errorf("requested SecurityContextConstraints '%s' not found on cluster", sccName) + default: + return false, err + } + } + + found := false + var newUsers []string + for _, user := range scc.Users { + if user == serviceaccount { + found = true + continue + } + newUsers = append(newUsers, user) + } + if !found { + return false, err + } + + scc.Users = newUsers + + if err := nonCachingClient.Update(ctx, scc); err != nil { + switch { + case k8sErrors.IsForbidden(err): + return false, fmt.Errorf("operator does not have permissions to update the '%s' SecurityContextConstraints", sccName) + case k8sErrors.IsConflict(err): + return true, nil + default: + return false, err + } + } + + return false, nil +}