From dba663eb22d8e5d972a4495b2bc99f396f442d01 Mon Sep 17 00:00:00 2001 From: Sebastian Widmer Date: Wed, 19 Jun 2024 16:19:01 +0200 Subject: [PATCH] Implement `UpgradeSuspensionWindow` --- api/v1beta1/upgradejob_types.go | 2 + api/v1beta1/upgradejobhook_types.go | 1 + api/v1beta1/upgradesuspensionwindow_types.go | 60 +++++++ api/v1beta1/zz_generated.deepcopy.go | 101 +++++++++++ ...agedupgrade.appuio.io_upgradejobhooks.yaml | 1 + ...de.appuio.io_upgradesuspensionwindows.yaml | 168 ++++++++++++++++++ config/crd/kustomization.yaml | 3 + ...injection_in_upgradesuspensionwindows.yaml | 7 + .../webhook_in_upgradesuspensionwindows.yaml | 16 ++ config/rbac/role.yaml | 14 ++ .../upgradesuspensionwindow_editor_role.yaml | 31 ++++ .../upgradesuspensionwindow_viewer_role.yaml | 27 +++ controllers/upgradeconfig_controller.go | 52 +++++- controllers/upgradeconfig_controller_test.go | 159 ++++++++++++++++- controllers/upgradejob_controller.go | 69 +++++-- controllers/upgradejob_controller_test.go | 135 +++++++++++++- main.go | 5 +- 17 files changed, 834 insertions(+), 17 deletions(-) create mode 100644 api/v1beta1/upgradesuspensionwindow_types.go create mode 100644 config/crd/bases/managedupgrade.appuio.io_upgradesuspensionwindows.yaml create mode 100644 config/crd/patches/cainjection_in_upgradesuspensionwindows.yaml create mode 100644 config/crd/patches/webhook_in_upgradesuspensionwindows.yaml create mode 100644 config/rbac/upgradesuspensionwindow_editor_role.yaml create mode 100644 config/rbac/upgradesuspensionwindow_viewer_role.yaml diff --git a/api/v1beta1/upgradejob_types.go b/api/v1beta1/upgradejob_types.go index 9667ab9..33502a3 100644 --- a/api/v1beta1/upgradejob_types.go +++ b/api/v1beta1/upgradejob_types.go @@ -45,6 +45,8 @@ const ( UpgradeJobReasonStarted = "Started" // UpgradeJobReasonSucceeded is used when a step of the upgrade job did succeed UpgradeJobReasonSucceeded = "Succeeded" + // UpgradeJobReasonSkipped is used when the upgrade job was skipped due to a suspension window + UpgradeJobReasonSkipped = "Skipped" // UpgradeJobReasonCompleted is used when a step of the upgrade job did succeed UpgradeJobReasonCompleted = "Completed" // UpgradeJobReasonInProgress is used when the pre health check was done diff --git a/api/v1beta1/upgradejobhook_types.go b/api/v1beta1/upgradejobhook_types.go index ee8e5a9..cfb5a70 100644 --- a/api/v1beta1/upgradejobhook_types.go +++ b/api/v1beta1/upgradejobhook_types.go @@ -56,6 +56,7 @@ var eventsInfluencingOutcome = []UpgradeEvent{ // UpgradeJobHookSpec defines the desired state of UpgradeJobHook type UpgradeJobHookSpec struct { // Events is the list of events to trigger the hook to be executed. + // Events should be idempotent and not assume any prior events have been executed. // `Create`, `Start`, and `UpgradeComplete` are the events that influence the outcome of the upgrade. // `Finish`, `Success`, and `Failure` do not influence the outcome of the upgrade, // Job completion will not be checked, they are only used for informational purposes. diff --git a/api/v1beta1/upgradesuspensionwindow_types.go b/api/v1beta1/upgradesuspensionwindow_types.go new file mode 100644 index 0000000..b08dbbc --- /dev/null +++ b/api/v1beta1/upgradesuspensionwindow_types.go @@ -0,0 +1,60 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// UpgradeSuspensionWindowSpec defines the desired state of UpgradeSuspensionWindow +type UpgradeSuspensionWindowSpec struct { + // Start is the time when the suspension window starts. + // +kubebuilder:validation:Required + // +required + Start metav1.Time `json:"start"` + // End is the time when the suspension window ends. + // +kubebuilder:validation:Required + // +required + End metav1.Time `json:"end"` + Reason string `json:"reason"` + + // ConfigSelector is the selector for UpgradeConfigs to suspend upgrades for. + // An empty label selector matches all objects. A null label selector matches no objects. + // Matching UpgradeConfig objects won’t create UpgradeJob objects during the time window. + ConfigSelector *metav1.LabelSelector `json:"configSelector,omitempty"` + // JobSelector is the selector for UpgradeJobs to suspend upgrades for. + // An empty label selector matches all objects. A null label selector matches no objects. + // Matching UpgradeJob objects won’t start the upgrade during the time window. + // Skipped jobs will be marked as successful with reason skipped. + // Success and Finish hooks will be executed as normal. + // If the job was owned by a UpgradeConfig object, the object creates a new job with the current (possibly same) version in the next non-suspended time window. + // Already running jobs will be allowed to finish. + JobSelector *metav1.LabelSelector `json:"jobSelector,omitempty"` +} + +// UpgradeSuspensionWindowStatus defines the observed state of UpgradeSuspensionWindow +type UpgradeSuspensionWindowStatus struct { +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// UpgradeSuspensionWindow is the Schema for the upgradejobs API +type UpgradeSuspensionWindow struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec UpgradeSuspensionWindowSpec `json:"spec,omitempty"` + Status UpgradeSuspensionWindowStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// UpgradeSuspensionWindowList contains a list of UpgradeSuspensionWindow +type UpgradeSuspensionWindowList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UpgradeSuspensionWindow `json:"items"` +} + +func init() { + SchemeBuilder.Register(&UpgradeSuspensionWindow{}, &UpgradeSuspensionWindowList{}) +} diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 92603b2..30a5ce3 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -688,3 +688,104 @@ func (in *UpgradeJobStatus) DeepCopy() *UpgradeJobStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeSuspensionWindow) DeepCopyInto(out *UpgradeSuspensionWindow) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSuspensionWindow. +func (in *UpgradeSuspensionWindow) DeepCopy() *UpgradeSuspensionWindow { + if in == nil { + return nil + } + out := new(UpgradeSuspensionWindow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UpgradeSuspensionWindow) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeSuspensionWindowList) DeepCopyInto(out *UpgradeSuspensionWindowList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UpgradeSuspensionWindow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSuspensionWindowList. +func (in *UpgradeSuspensionWindowList) DeepCopy() *UpgradeSuspensionWindowList { + if in == nil { + return nil + } + out := new(UpgradeSuspensionWindowList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UpgradeSuspensionWindowList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeSuspensionWindowSpec) DeepCopyInto(out *UpgradeSuspensionWindowSpec) { + *out = *in + in.Start.DeepCopyInto(&out.Start) + in.End.DeepCopyInto(&out.End) + if in.ConfigSelector != nil { + in, out := &in.ConfigSelector, &out.ConfigSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.JobSelector != nil { + in, out := &in.JobSelector, &out.JobSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSuspensionWindowSpec. +func (in *UpgradeSuspensionWindowSpec) DeepCopy() *UpgradeSuspensionWindowSpec { + if in == nil { + return nil + } + out := new(UpgradeSuspensionWindowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeSuspensionWindowStatus) DeepCopyInto(out *UpgradeSuspensionWindowStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeSuspensionWindowStatus. +func (in *UpgradeSuspensionWindowStatus) DeepCopy() *UpgradeSuspensionWindowStatus { + if in == nil { + return nil + } + out := new(UpgradeSuspensionWindowStatus) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/managedupgrade.appuio.io_upgradejobhooks.yaml b/config/crd/bases/managedupgrade.appuio.io_upgradejobhooks.yaml index 4abdf07..0e0b888 100644 --- a/config/crd/bases/managedupgrade.appuio.io_upgradejobhooks.yaml +++ b/config/crd/bases/managedupgrade.appuio.io_upgradejobhooks.yaml @@ -49,6 +49,7 @@ spec: events: description: |- Events is the list of events to trigger the hook to be executed. + Events should be idempotent and not assume any prior events have been executed. `Create`, `Start`, and `UpgradeComplete` are the events that influence the outcome of the upgrade. `Finish`, `Success`, and `Failure` do not influence the outcome of the upgrade, Job completion will not be checked, they are only used for informational purposes. diff --git a/config/crd/bases/managedupgrade.appuio.io_upgradesuspensionwindows.yaml b/config/crd/bases/managedupgrade.appuio.io_upgradesuspensionwindows.yaml new file mode 100644 index 0000000..41d5ac0 --- /dev/null +++ b/config/crd/bases/managedupgrade.appuio.io_upgradesuspensionwindows.yaml @@ -0,0 +1,168 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: upgradesuspensionwindows.managedupgrade.appuio.io +spec: + group: managedupgrade.appuio.io + names: + kind: UpgradeSuspensionWindow + listKind: UpgradeSuspensionWindowList + plural: upgradesuspensionwindows + singular: upgradesuspensionwindow + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: UpgradeSuspensionWindow is the Schema for the upgradejobs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UpgradeSuspensionWindowSpec defines the desired state of + UpgradeSuspensionWindow + properties: + configSelector: + description: |- + ConfigSelector is the selector for UpgradeConfigs to suspend upgrades for. + An empty label selector matches all objects. A null label selector matches no objects. + Matching UpgradeConfig objects won’t create UpgradeJob objects during the time window. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + end: + description: End is the time when the suspension window ends. + format: date-time + type: string + jobSelector: + description: |- + JobSelector is the selector for UpgradeJobs to suspend upgrades for. + An empty label selector matches all objects. A null label selector matches no objects. + Matching UpgradeJob objects won’t start the upgrade during the time window. + Skipped jobs will be marked as successful with reason skipped. + Success and Finish hooks will be executed as normal. + If the job was owned by a UpgradeConfig object, the object creates a new job with the current (possibly same) version in the next non-suspended time window. + Already running jobs will be allowed to finish. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + reason: + type: string + start: + description: Start is the time when the suspension window starts. + format: date-time + type: string + required: + - end + - reason + - start + type: object + status: + description: UpgradeSuspensionWindowStatus defines the observed state + of UpgradeSuspensionWindow + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 50803ae..a68a2f6 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -6,6 +6,7 @@ resources: - bases/managedupgrade.appuio.io_upgradejobs.yaml - bases/managedupgrade.appuio.io_upgradeconfigs.yaml - bases/managedupgrade.appuio.io_upgradejobhooks.yaml +- bases/managedupgrade.appuio.io_upgradesuspensionwindow.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -15,6 +16,7 @@ patchesStrategicMerge: #- patches/webhook_in_upgradejobs.yaml #- patches/webhook_in_upgradeconfigs.yaml #- patches/webhook_in_upgradejobhooks.yaml +#- patches/webhook_in_upgradesuspensionwindows.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -23,6 +25,7 @@ patchesStrategicMerge: #- patches/cainjection_in_upgradejobs.yaml #- patches/cainjection_in_upgradeconfigs.yaml #- patches/cainjection_in_upgradejobhooks.yaml +#- patches/cainjection_in_upgradesuspensionwindows.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_upgradesuspensionwindows.yaml b/config/crd/patches/cainjection_in_upgradesuspensionwindows.yaml new file mode 100644 index 0000000..b384bcc --- /dev/null +++ b/config/crd/patches/cainjection_in_upgradesuspensionwindows.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: upgradesuspensionwindows.managedupgrade.appuio.io diff --git a/config/crd/patches/webhook_in_upgradesuspensionwindows.yaml b/config/crd/patches/webhook_in_upgradesuspensionwindows.yaml new file mode 100644 index 0000000..a82c662 --- /dev/null +++ b/config/crd/patches/webhook_in_upgradesuspensionwindows.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: upgradesuspensionwindows.managedupgrade.appuio.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 290f407..c9808b8 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -142,3 +142,17 @@ rules: - get - patch - update +- apiGroups: + - managedupgrade.appuio.io + resources: + - upgradesuspensionwindows + verbs: + - get + - list + - watch +- apiGroups: + - managedupgrade.appuio.io + resources: + - upgradesuspensionwindows/status + verbs: + - get diff --git a/config/rbac/upgradesuspensionwindow_editor_role.yaml b/config/rbac/upgradesuspensionwindow_editor_role.yaml new file mode 100644 index 0000000..f69bb1d --- /dev/null +++ b/config/rbac/upgradesuspensionwindow_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit upgradesuspensionwindows. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: upgradesuspensionwindow-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: openshift-upgrade-controller + app.kubernetes.io/part-of: openshift-upgrade-controller + app.kubernetes.io/managed-by: kustomize + name: upgradesuspensionwindow-editor-role +rules: +- apiGroups: + - managedupgrade.appuio.io + resources: + - upgradesuspensionwindows + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - managedupgrade.appuio.io + resources: + - upgradesuspensionwindows/status + verbs: + - get diff --git a/config/rbac/upgradesuspensionwindow_viewer_role.yaml b/config/rbac/upgradesuspensionwindow_viewer_role.yaml new file mode 100644 index 0000000..b6a3338 --- /dev/null +++ b/config/rbac/upgradesuspensionwindow_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view upgradesuspensionwindows. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: upgradesuspensionwindow-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: openshift-upgrade-controller + app.kubernetes.io/part-of: openshift-upgrade-controller + app.kubernetes.io/managed-by: kustomize + name: upgradesuspensionwindow-viewer-role +rules: +- apiGroups: + - managedupgrade.appuio.io + resources: + - upgradesuspensionwindows + verbs: + - get + - list + - watch +- apiGroups: + - managedupgrade.appuio.io + resources: + - upgradesuspensionwindows/status + verbs: + - get diff --git a/controllers/upgradeconfig_controller.go b/controllers/upgradeconfig_controller.go index 8bade0d..73ecc4f 100644 --- a/controllers/upgradeconfig_controller.go +++ b/controllers/upgradeconfig_controller.go @@ -11,8 +11,10 @@ import ( "github.com/robfig/cron/v3" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -21,6 +23,11 @@ import ( "github.com/appuio/openshift-upgrade-controller/pkg/clusterversion" ) +const ( + EventReasonUpgradeConfigSuspended = "UpgradeConfigSuspended" + EventReasonUpgradeConfigSuspendedBySuspensionWindow = "UpgradeConfigSuspendedBySuspensionWindow" +) + type Clock interface { Now() time.Time } @@ -28,7 +35,8 @@ type Clock interface { // UpgradeConfigReconciler reconciles a UpgradeConfig object type UpgradeConfigReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + Recorder record.EventRecorder Clock Clock @@ -41,6 +49,9 @@ type UpgradeConfigReconciler struct { //+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradeconfigs/status,verbs=get;update;patch //+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradeconfigs/finalizers,verbs=update +//+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradesuspensionwindows,verbs=get;list;watch +//+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradesuspensionwindows/status,verbs=get + // Reconcile implements the reconcile loop for UpgradeConfig. // It schedules UpgradeJobs based on the UpgradeConfig's schedule - if an update is available. func (r *UpgradeConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -143,7 +154,18 @@ findNextRun: // Schedule is suspended, do nothing if uc.Spec.Schedule.Suspend { - l.Info("would schedule job, but schedule is suspended") + l.Info("would schedule job, but schedule is suspended by .spec.schedule.suspend") + r.Recorder.Event(&uc, "Normal", EventReasonUpgradeConfigSuspended, "Upgrade scheduling is suspended by .spec.schedule.suspend") + return ctrl.Result{}, r.setLastScheduledUpgrade(ctx, &uc, nextRun) + } + // Check if we are in a suspension window + window, err := r.matchingUpgradeSuspensionWindow(ctx, uc, now) + if err != nil { + return ctrl.Result{}, fmt.Errorf("could not search matching upgrade suspension window: %w", err) + } + if window != nil { + l.Info("would schedule job, but schedule is suspended by UpgradeSuspensionWindow", "window", window.Name, "reason", window.Spec.Reason, "start", window.Spec.Start.Time, "end", window.Spec.End.Time) + r.Recorder.Eventf(&uc, "Normal", EventReasonUpgradeConfigSuspendedBySuspensionWindow, "Upgrade scheduling is suspended by UpgradeSuspensionWindow %s: %s", window.Name, window.Spec.Reason) return ctrl.Result{}, r.setLastScheduledUpgrade(ctx, &uc, nextRun) } @@ -215,6 +237,32 @@ func (r *UpgradeConfigReconciler) getControlledJobs(ctx context.Context, uc mana return filterControlledJobs(uc, jobs.Items), nil } +// matchingUpgradeSuspensionWindow returns the UpgradeSuspensionWindow that matches the UpgradeConfig and time or nil if none matches. +func (r *UpgradeConfigReconciler) matchingUpgradeSuspensionWindow(ctx context.Context, ucw managedupgradev1beta1.UpgradeConfig, t time.Time) (*managedupgradev1beta1.UpgradeSuspensionWindow, error) { + l := log.FromContext(ctx) + + var allWindows managedupgradev1beta1.UpgradeSuspensionWindowList + if err := r.List(ctx, &allWindows, client.InNamespace(ucw.Namespace)); err != nil { + return nil, fmt.Errorf("failed to list hooks: %w", err) + } + for _, window := range allWindows.Items { + sel, err := metav1.LabelSelectorAsSelector(window.Spec.ConfigSelector) + if err != nil { + l.Error(err, "failed to parse selector from window", "window", window.Name, "selector", "configSelector") + continue + } + if !sel.Matches(labels.Set(ucw.Labels)) { + continue + } + if t.Before(window.Spec.Start.Time) || t.After(window.Spec.End.Time) { + continue + } + return &window, nil + } + + return nil, nil +} + func latestScheduledJob(jobs []managedupgradev1beta1.UpgradeJob) *managedupgradev1beta1.UpgradeJob { var latest *managedupgradev1beta1.UpgradeJob for _, job := range jobs { diff --git a/controllers/upgradeconfig_controller_test.go b/controllers/upgradeconfig_controller_test.go index 963ca99..d487eee 100644 --- a/controllers/upgradeconfig_controller_test.go +++ b/controllers/upgradeconfig_controller_test.go @@ -3,6 +3,7 @@ package controllers import ( "context" "strconv" + "strings" "testing" "time" @@ -10,6 +11,7 @@ import ( "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -55,7 +57,32 @@ func Test_UpgradeConfigReconciler_Reconcile_E2E(t *testing.T) { }, } - client := controllerClient(t, ucv, upgradeConfig) + expiredSuspensionWindow := &managedupgradev1beta1.UpgradeSuspensionWindow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "expired-window", + Namespace: "appuio-openshift-upgrade-controller", + }, + Spec: managedupgradev1beta1.UpgradeSuspensionWindowSpec{ + Start: metav1.NewTime(clock.Now().Add(-7 * 24 * time.Hour)), + End: metav1.NewTime(clock.Now().Add(-24 * time.Hour)), + JobSelector: &metav1.LabelSelector{ + MatchLabels: upgradeConfig.Labels, + }, + }, + } + unrelatedSuspensionWindow := &managedupgradev1beta1.UpgradeSuspensionWindow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unrelated-window", + Namespace: "appuio-openshift-upgrade-controller", + }, + Spec: managedupgradev1beta1.UpgradeSuspensionWindowSpec{ + Start: metav1.NewTime(clock.Now().Add(-24 * time.Hour)), + End: metav1.NewTime(clock.Now().Add(90 * 24 * time.Hour)), + JobSelector: &metav1.LabelSelector{}, + }, + } + + client := controllerClient(t, ucv, upgradeConfig, expiredSuspensionWindow, unrelatedSuspensionWindow) subject := &UpgradeConfigReconciler{ Client: client, @@ -182,6 +209,136 @@ func Test_UpgradeConfigReconciler_Reconcile_E2E(t *testing.T) { }) } +func Test_UpgradeConfigReconciler_Reconcile_SuspendedByWindow(t *testing.T) { + ctx := context.Background() + clock := mockClock{now: time.Date(2022, time.April, 4, 8, 0, 0, 0, time.UTC)} + t.Log("Now: ", clock.Now()) + require.Equal(t, 14, func() int { _, isoweek := clock.Now().ISOWeek(); return isoweek }()) + require.Equal(t, time.Monday, clock.Now().Weekday()) + + ucv := &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: "version", + }, + Spec: configv1.ClusterVersionSpec{ + ClusterID: "9b588658-9671-429c-a762-34106da5795f", + }, + Status: configv1.ClusterVersionStatus{ + AvailableUpdates: []configv1.Release{ + { + Version: "4.5.13", + Image: "quay.io/openshift-release-dev/ocp-release@sha256:d094f1952995b3c5fd8e0b19b128905931e1e8fdb4b6cb377857ab0dfddcff47", + }, + }, + }, + } + + upgradeConfig := &managedupgradev1beta1.UpgradeConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "daily-maintenance", + Namespace: "appuio-openshift-upgrade-controller", + CreationTimestamp: metav1.Time{Time: clock.Now().Add(-time.Hour)}, + }, + Spec: managedupgradev1beta1.UpgradeConfigSpec{ + Schedule: managedupgradev1beta1.UpgradeConfigSchedule{ + Cron: "0 22 * * *", // At 22:00 every day + Location: "UTC", + }, + PinVersionWindow: metav1.Duration{Duration: time.Hour}, + MaxSchedulingDelay: metav1.Duration{Duration: time.Minute}, + MaxUpgradeStartDelay: metav1.Duration{Duration: time.Hour}, + JobTemplate: managedupgradev1beta1.UpgradeConfigJobTemplate{ + Metadata: metav1.ObjectMeta{ + Labels: map[string]string{"app": "openshift-upgrade-controller"}, + }, + }, + }, + } + + suspensionWindow := &managedupgradev1beta1.UpgradeSuspensionWindow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "suspension-window", + Namespace: "appuio-openshift-upgrade-controller", + }, + Spec: managedupgradev1beta1.UpgradeSuspensionWindowSpec{ + Start: metav1.NewTime(clock.Now().Add(-time.Hour)), + End: metav1.NewTime(clock.Now().Add(24 * time.Hour)), + ConfigSelector: &metav1.LabelSelector{}, + }, + } + + client := controllerClient(t, ucv, upgradeConfig, suspensionWindow) + + recorder := record.NewFakeRecorder(5) + subject := &UpgradeConfigReconciler{ + Client: client, + Scheme: client.Scheme(), + Recorder: recorder, + + Clock: &clock, + + ManagedUpstreamClusterVersionName: "version", + } + + step(t, "not in job creation window", func(t *testing.T) { + res, err := subject.Reconcile(ctx, requestForObject(upgradeConfig)) + require.NoError(t, err) + // 14 hours (08:00->22:00) - 1 hour (version pin window) + require.Equal(t, (14*time.Hour)-upgradeConfig.Spec.PinVersionWindow.Duration, res.RequeueAfter) + clock.Advance(res.RequeueAfter) + }) + + step(t, "suspended by window", func(t *testing.T) { + reconcileNTimes(t, subject, ctx, requestForObject(upgradeConfig), 3) + jobs := listJobs(t, client, upgradeConfig.Namespace) + require.Len(t, jobs, 0) + requireEventMatches(t, recorder, EventReasonUpgradeConfigSuspendedBySuspensionWindow, suspensionWindow.Name) + }) + + step(t, "window expired, create job", func(t *testing.T) { + clock.Advance(24 * time.Hour) + + _, err := subject.Reconcile(ctx, requestForObject(upgradeConfig)) + require.NoError(t, err) + + jobs := listJobs(t, client, upgradeConfig.Namespace) + require.Len(t, jobs, 1) + }) +} + +// requireEventMatches asserts that an event with the given substrings is present in the recorder. +// It consumes all events from the recorder. +func requireEventMatches(t *testing.T, recorder *record.FakeRecorder, substrings ...string) { + t.Helper() + + events := make([]string, 0, len(recorder.Events)) + for end := false; !end; { + select { + case event := <-recorder.Events: + events = append(events, event) + default: + end = true + } + } + + var matchingEvent string + for _, event := range events { + matches := true + for _, substr := range substrings { + if !strings.Contains(event, substr) { + matches = false + break + } + } + if matches { + matchingEvent = event + break + } + } + + require.NotEmpty(t, matchingEvent, "no event matches %v, got %v", substrings, events) +} + // requireTimeEqual asserts that two times are equal, ignoring their timezones. func requireTimeEqual(t *testing.T, expected, actual time.Time, msgAndArgs ...any) { t.Helper() diff --git a/controllers/upgradejob_controller.go b/controllers/upgradejob_controller.go index 3ce9f51..bd64a81 100644 --- a/controllers/upgradejob_controller.go +++ b/controllers/upgradejob_controller.go @@ -67,6 +67,9 @@ const ( //+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradejobhooks,verbs=get;list;watch //+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradejobhooks/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradesuspensionwindows,verbs=get;list;watch +//+kubebuilder:rbac:groups=managedupgrade.appuio.io,resources=upgradesuspensionwindows/status,verbs=get + //+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=batch,resources=jobs/finalizers,verbs=update @@ -91,20 +94,20 @@ func (r *UpgradeJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) if sc != nil && sc.Status == metav1.ConditionTrue { // Ignore hooks status, they can't influence the upgrade anymore. // Don't execute hooks created after the job was finished. - _, eserr := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventSuccess, sc.LastTransitionTime.Time) - _, eferr := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventFinish, sc.LastTransitionTime.Time) + _, eserr := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventSuccess, sc.Reason, sc.LastTransitionTime.Time) + _, eferr := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventFinish, sc.Reason, sc.LastTransitionTime.Time) return ctrl.Result{}, multierr.Combine(eserr, eferr, r.cleanupLock(ctx, &uj)) } fc := apimeta.FindStatusCondition(uj.Status.Conditions, managedupgradev1beta1.UpgradeJobConditionFailed) if fc != nil && fc.Status == metav1.ConditionTrue { // Ignore hooks status, they can't influence the upgrade anymore. // Don't execute hooks created after the job was finished. - _, efaerr := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventFailure, fc.LastTransitionTime.Time) - _, efierr := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventFinish, fc.LastTransitionTime.Time) + _, efaerr := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventFailure, fc.Reason, fc.LastTransitionTime.Time) + _, efierr := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventFinish, fc.Reason, fc.LastTransitionTime.Time) return ctrl.Result{}, multierr.Combine(efaerr, efierr, r.cleanupLock(ctx, &uj)) } - cont, err := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventCreate, time.Time{}) + cont, err := r.executeHooks(ctx, &uj, managedupgradev1beta1.EventCreate, "", time.Time{}) if err != nil { return ctrl.Result{}, err } @@ -118,6 +121,21 @@ func (r *UpgradeJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) now := r.Clock.Now() + window, err := r.matchingUpgradeSuspensionWindow(ctx, uj, now) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to search for matching upgrade suspension window: %w", err) + } + if window != nil { + l.Info("Upgrade job skipped by UpgradeSuspensionWindow", "window", window.Name, "reason", window.Spec.Reason, "start", window.Spec.Start.Time, "end", window.Spec.End.Time) + r.setStatusCondition(&uj.Status.Conditions, metav1.Condition{ + Type: managedupgradev1beta1.UpgradeJobConditionSucceeded, + Status: metav1.ConditionTrue, + Reason: managedupgradev1beta1.UpgradeJobReasonSkipped, + Message: fmt.Sprintf("Upgrade job skipped by UpgradeSuspensionWindow %q, reason: %q", window.Name, window.Spec.Reason), + }) + return ctrl.Result{}, r.Status().Update(ctx, &uj) + } + if now.After(uj.Spec.StartBefore.Time) { r.setStatusCondition(&uj.Status.Conditions, metav1.Condition{ Type: managedupgradev1beta1.UpgradeJobConditionFailed, @@ -146,7 +164,7 @@ func (r *UpgradeJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) func (r *UpgradeJobReconciler) reconcileStartedJob(ctx context.Context, uj *managedupgradev1beta1.UpgradeJob) (ctrl.Result, error) { l := log.FromContext(ctx).WithName("UpgradeJobReconciler.reconcileStartedJob") - cont, err := r.executeHooks(ctx, uj, managedupgradev1beta1.EventStart, time.Time{}) + cont, err := r.executeHooks(ctx, uj, managedupgradev1beta1.EventStart, managedupgradev1beta1.UpgradeJobReasonStarted, time.Time{}) if err != nil { return ctrl.Result{}, err } @@ -262,7 +280,7 @@ func (r *UpgradeJobReconciler) reconcileStartedJob(ctx context.Context, uj *mana return ctrl.Result{}, nil } - cont, err = r.executeHooks(ctx, uj, managedupgradev1beta1.EventUpgradeComplete, time.Time{}) + cont, err = r.executeHooks(ctx, uj, managedupgradev1beta1.EventUpgradeComplete, managedupgradev1beta1.UpgradeJobReasonCompleted, time.Time{}) if err != nil { return ctrl.Result{}, err } @@ -461,7 +479,7 @@ func (r *UpgradeJobReconciler) tryLockClusterVersion(ctx context.Context, versio return nil } -func (r *UpgradeJobReconciler) executeHooks(ctx context.Context, uj *managedupgradev1beta1.UpgradeJob, event managedupgradev1beta1.UpgradeEvent, cutoffTime time.Time) (bool, error) { +func (r *UpgradeJobReconciler) executeHooks(ctx context.Context, uj *managedupgradev1beta1.UpgradeJob, event managedupgradev1beta1.UpgradeEvent, reason string, cutoffTime time.Time) (bool, error) { l := log.FromContext(ctx) var allHooks managedupgradev1beta1.UpgradeJobHookList @@ -505,7 +523,7 @@ func (r *UpgradeJobReconciler) executeHooks(ctx context.Context, uj *managedupgr errors := []error{} failedJobs := []string{} for _, hook := range hooks { - jobs, err := r.jobForUpgradeJobAndHook(ctx, uj, hook, event, hookJobMeta{MatchesDisruptiveHooks: hasMatchingDisruptiveHook}) + jobs, err := r.jobForUpgradeJobAndHook(ctx, uj, hook, event, reason, hookJobMeta{MatchesDisruptiveHooks: hasMatchingDisruptiveHook}) if err != nil { errors = append(errors, err) continue @@ -557,6 +575,7 @@ func (r *UpgradeJobReconciler) jobForUpgradeJobAndHook( uj *managedupgradev1beta1.UpgradeJob, hook managedupgradev1beta1.UpgradeJobHook, event managedupgradev1beta1.UpgradeEvent, + reason string, meta hookJobMeta, ) ([]managedupgradev1beta1.HookJobTracker, error) { var jobs batchv1.JobList @@ -580,7 +599,7 @@ func (r *UpgradeJobReconciler) jobForUpgradeJobAndHook( return jobStatus, nil } - _, err := r.createHookJob(ctx, hook, uj, event, meta) + _, err := r.createHookJob(ctx, hook, uj, event, reason, meta) return []managedupgradev1beta1.HookJobTracker{{ HookEvent: string(event), UpgradeJobHookName: hook.Name, @@ -593,6 +612,7 @@ func (r *UpgradeJobReconciler) createHookJob( hook managedupgradev1beta1.UpgradeJobHook, uj *managedupgradev1beta1.UpgradeJob, event managedupgradev1beta1.UpgradeEvent, + reason string, meta hookJobMeta, ) (batchv1.Job, error) { l := log.FromContext(ctx) @@ -603,7 +623,8 @@ func (r *UpgradeJobReconciler) createHookJob( maps.Copy(ll, jobLabels(uj.Name, hook.Name, event)) normalizedEvent := map[string]any{ - "name": string(event), + "name": string(event), + "reason": reason, } normalizedUJ, err := normalizeAsJson(uj) @@ -963,3 +984,29 @@ func boolToStatus(b bool) metav1.ConditionStatus { } return metav1.ConditionFalse } + +// matchingUpgradeSuspensionWindow returns the UpgradeSuspensionWindow that matches the UpgradeJob and time or nil if none matches. +func (r *UpgradeJobReconciler) matchingUpgradeSuspensionWindow(ctx context.Context, uj managedupgradev1beta1.UpgradeJob, t time.Time) (*managedupgradev1beta1.UpgradeSuspensionWindow, error) { + l := log.FromContext(ctx) + + var allWindows managedupgradev1beta1.UpgradeSuspensionWindowList + if err := r.List(ctx, &allWindows, client.InNamespace(uj.Namespace)); err != nil { + return nil, fmt.Errorf("failed to list hooks: %w", err) + } + for _, window := range allWindows.Items { + sel, err := metav1.LabelSelectorAsSelector(window.Spec.JobSelector) + if err != nil { + l.Error(err, "failed to parse selector from window", "window", window.Name, "selector", "jobSelector") + continue + } + if !sel.Matches(labels.Set(uj.Labels)) { + continue + } + if t.Before(window.Spec.Start.Time) || t.After(window.Spec.End.Time) { + continue + } + return &window, nil + } + + return nil, nil +} diff --git a/controllers/upgradejob_controller_test.go b/controllers/upgradejob_controller_test.go index bcdbceb..f73d7b5 100644 --- a/controllers/upgradejob_controller_test.go +++ b/controllers/upgradejob_controller_test.go @@ -95,6 +95,30 @@ func Test_UpgradeJobReconciler_Reconcile_E2E_Upgrade(t *testing.T) { }, }, } + expiredSuspensionWindow := &managedupgradev1beta1.UpgradeSuspensionWindow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "expired-window", + Namespace: "appuio-openshift-upgrade-controller", + }, + Spec: managedupgradev1beta1.UpgradeSuspensionWindowSpec{ + Start: metav1.NewTime(clock.Now().Add(-7 * 24 * time.Hour)), + End: metav1.NewTime(clock.Now().Add(-24 * time.Hour)), + JobSelector: &metav1.LabelSelector{ + MatchLabels: upgradeJob.Labels, + }, + }, + } + unrelatedSuspensionWindow := &managedupgradev1beta1.UpgradeSuspensionWindow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unrelated-window", + Namespace: "appuio-openshift-upgrade-controller", + }, + Spec: managedupgradev1beta1.UpgradeSuspensionWindowSpec{ + Start: metav1.NewTime(clock.Now().Add(-7 * 24 * time.Hour)), + End: metav1.NewTime(clock.Now().Add(7 * 24 * time.Hour)), + ConfigSelector: &metav1.LabelSelector{}, + }, + } masterPool := &machineconfigurationv1.MachineConfigPool{ ObjectMeta: metav1.ObjectMeta{ @@ -106,7 +130,7 @@ func Test_UpgradeJobReconciler_Reconcile_E2E_Upgrade(t *testing.T) { }, } - c := controllerClient(t, ucv, upgradeJob, upgradeJobHook, masterPool) + c := controllerClient(t, ucv, upgradeJob, upgradeJobHook, masterPool, expiredSuspensionWindow, unrelatedSuspensionWindow) subject := &UpgradeJobReconciler{ Client: c, @@ -298,6 +322,114 @@ func Test_UpgradeJobReconciler_Reconcile_E2E_Upgrade(t *testing.T) { }) } +func Test_UpgradeJobReconciler_Reconcile_Skipped_Job(t *testing.T) { + ctx := context.Background() + clock := mockClock{now: time.Date(2022, 12, 4, 22, 45, 0, 0, time.UTC)} + + ucv := &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: "version", + }, + } + upgradeJob := &managedupgradev1beta1.UpgradeJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "upgrade-1234-4-5-13", + Namespace: "appuio-openshift-upgrade-controller", + Labels: map[string]string{ + "name": "upgrade-1234-4-5-13", + }, + }, + Spec: managedupgradev1beta1.UpgradeJobSpec{ + StartBefore: metav1.NewTime(clock.Now().Add(10 * time.Hour)), + StartAfter: metav1.NewTime(clock.Now().Add(time.Hour)), + DesiredVersion: &configv1.Update{ + Version: "4.5.13", + Image: "quay.io/openshift-release-dev/ocp-release@sha256:d094f1952995b3c5fd8e0b19b128905931e1e8fdb4b6cb377857ab0dfddcff47", + }, + UpgradeJobConfig: managedupgradev1beta1.UpgradeJobConfig{ + UpgradeTimeout: metav1.Duration{Duration: 12 * time.Hour}, + }, + }, + } + upgradeJobHook := &managedupgradev1beta1.UpgradeJobHook{ + ObjectMeta: metav1.ObjectMeta{ + Name: "notify", + Namespace: "appuio-openshift-upgrade-controller", + }, + Spec: managedupgradev1beta1.UpgradeJobHookSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: upgradeJob.Labels, + }, + Events: []managedupgradev1beta1.UpgradeEvent{ + managedupgradev1beta1.EventFinish, + managedupgradev1beta1.EventSuccess, + }, + Template: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Image: "test", + }, + }, + }, + }, + }, + }, + }, + } + suspensionWindow := &managedupgradev1beta1.UpgradeSuspensionWindow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "holidays", + Namespace: "appuio-openshift-upgrade-controller", + }, + Spec: managedupgradev1beta1.UpgradeSuspensionWindowSpec{ + Start: metav1.NewTime(clock.Now().Add(-time.Hour)), + End: metav1.NewTime(clock.Now().Add(time.Hour)), + Reason: "End of year holidays", + JobSelector: &metav1.LabelSelector{ + MatchLabels: upgradeJob.Labels, + }, + }, + } + + c := controllerClient(t, ucv, upgradeJob, upgradeJobHook, suspensionWindow) + + subject := &UpgradeJobReconciler{ + Client: c, + Scheme: c.Scheme(), + + Clock: &clock, + + ManagedUpstreamClusterVersionName: "version", + } + + step(t, "Upgrade Skipped because of window", func(t *testing.T) { + reconcileNTimes(t, subject, ctx, requestForObject(upgradeJob), 10) + require.NoError(t, c.Get(ctx, requestForObject(upgradeJob).NamespacedName, upgradeJob)) + sc := apimeta.FindStatusCondition(upgradeJob.Status.Conditions, managedupgradev1beta1.UpgradeJobConditionSucceeded) + require.NotNil(t, sc) + require.Equal(t, managedupgradev1beta1.UpgradeJobReasonSkipped, sc.Reason) + require.Equal(t, metav1.ConditionTrue, sc.Status) + require.Contains(t, sc.Message, suspensionWindow.Name) + require.Contains(t, sc.Message, suspensionWindow.Spec.Reason) + + require.NoError(t, c.Get(ctx, requestForObject(ucv).NamespacedName, ucv)) + require.Empty(t, ucv.Spec.DesiredUpdate, "cluster version should not be updated") + require.Empty(t, ucv.Annotations[ClusterVersionLockAnnotation], "cluster version should not be locked") + }) + + step(t, "`Success` and `Finish` hooks", func(t *testing.T) { + for _, event := range []managedupgradev1beta1.UpgradeEvent{managedupgradev1beta1.EventSuccess, managedupgradev1beta1.EventFinish} { + job := checkAndCompleteHook(t, c, subject, upgradeJob, upgradeJobHook, event, true) + require.Len(t, job.Spec.Template.Spec.Containers, 1) + requireEnv(t, job.Spec.Template.Spec.Containers[0].Env, "EVENT_reason", func(v string) (bool, error) { return v == `"Skipped"`, nil }) + } + }) +} + func Test_UpgradeJobReconciler_Reconcile_EmptyDesiredVersion(t *testing.T) { ctx := context.Background() clock := mockClock{now: time.Date(2022, 12, 4, 22, 45, 0, 0, time.UTC)} @@ -1458,6 +1590,7 @@ func controllerClient(t *testing.T, initObjs ...client.Object) client.WithWatch &managedupgradev1beta1.UpgradeJob{}, &managedupgradev1beta1.UpgradeJobHook{}, &managedupgradev1beta1.ClusterVersion{}, + &managedupgradev1beta1.UpgradeSuspensionWindow{}, &configv1.ClusterVersion{}, &batchv1.Job{}, &machineconfigurationv1.MachineConfigPool{}, diff --git a/main.go b/main.go index 41b2430..918324a 100644 --- a/main.go +++ b/main.go @@ -154,8 +154,9 @@ func main() { os.Exit(1) } if err = (&controllers.UpgradeConfigReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("upgrade-config-controller"), Clock: realClock{},