From c0c35fe885c9a6a5d4465aa8e38135d062866766 Mon Sep 17 00:00:00 2001 From: Andrew Bayer Date: Thu, 18 Aug 2022 09:41:52 -0400 Subject: [PATCH] Switch PipelineRun timeout -> TaskRun logic to instead signal the TaskRuns to stop Fixes #5127 As noted in #5127, the logic around calculating a timeout for a `PipelineRun`'s `TaskRun` to make sure that the `TaskRun`'s timeout is always going to end before the `PipelineRun`'s timeout ends is problematic. It can result in race conditions where a `TaskRun` gets timed out, immediately followed by the `PipelineRun` being reconciled while not yet having hit the end of its own timeout. This change gets rid of that behavior, and instead sets the `TaskRun.Spec.Status` to a new value, `TaskRunTimedOut`, with the `TaskRun` reconciler handling that in the same way it does setting `TaskRun.Spec.Status` to `TaskRunCancelled`. By doing this, we can unify the behavior for both `TaskRun`s and `Run`s upon `PipelineRun`s timing out, given that we already cancel `Run`s upon `PipelineRun` timeout, and we can kill off a bunch of flaky outcomes for `PipelineRun`s. Also, rather than setting a 1s timeout on tasks which are created after the `.timeouts.pipeline`, `.timeouts.tasks`, or `.timeouts.finally` timeouts have been passed, this change will skip creation of those `TaskRun`s or `Run`s completely. However, it will still report those "tasks" as failed, not skipped, since attempted creation of tasks after the appropriate timeout has been reached is a failure case, and that's the existing expected behavior for `PipelineTask`s which try to start after timeouts have elapsed. Huge thanks to @jerop for suggesting this approach! Signed-off-by: Andrew Bayer --- docs/taskruns.md | 27 +- pkg/apis/pipeline/v1alpha1/run_types.go | 2 + .../pipeline/v1beta1/openapi_generated.go | 12 + .../pipeline/v1beta1/pipelinerun_types.go | 44 + .../v1beta1/pipelinerun_types_test.go | 31 + pkg/apis/pipeline/v1beta1/swagger.json | 8 + pkg/apis/pipeline/v1beta1/taskrun_types.go | 2 + .../pipeline/v1beta1/taskrun_types_test.go | 8 +- .../pipeline/v1beta1/zz_generated.deepcopy.go | 4 + pkg/reconciler/pipelinerun/cancel.go | 43 +- pkg/reconciler/pipelinerun/cancel_test.go | 22 +- pkg/reconciler/pipelinerun/pipelinerun.go | 206 ++-- .../pipelinerun/pipelinerun_test.go | 889 +++++++++--------- .../resources/pipelinerunresolution.go | 107 ++- .../resources/pipelinerunresolution_test.go | 251 ++++- .../pipelinerun/resources/pipelinerunstate.go | 63 +- .../resources/pipelinerunstate_test.go | 127 ++- pkg/reconciler/pipelinerun/timeout.go | 139 +++ pkg/reconciler/pipelinerun/timeout_test.go | 252 +++++ pkg/reconciler/taskrun/taskrun_test.go | 73 ++ test/timeout_test.go | 16 +- 21 files changed, 1678 insertions(+), 648 deletions(-) create mode 100644 pkg/reconciler/pipelinerun/timeout.go create mode 100644 pkg/reconciler/pipelinerun/timeout_test.go diff --git a/docs/taskruns.md b/docs/taskruns.md index 46b32f179c3..c5845708d49 100644 --- a/docs/taskruns.md +++ b/docs/taskruns.md @@ -582,19 +582,20 @@ steps: The following tables shows how to read the overall status of a `TaskRun`: -`status`|`reason`|`completionTime` is set|Description -:-------|:-------|:---------------------:|--------------: -Unknown|Started|No|The TaskRun has just been picked up by the controller. -Unknown|Pending|No|The TaskRun is waiting on a Pod in status Pending. -Unknown|Running|No|The TaskRun has been validated and started to perform its work. -Unknown|TaskRunCancelled|No|The user requested the TaskRun to be cancelled. Cancellation has not been done yet. -True|Succeeded|Yes|The TaskRun completed successfully. -False|Failed|Yes|The TaskRun failed because one of the steps failed. -False|\[Error message\]|No|The TaskRun encountered a non-permanent error, and it's still running. It may ultimately succeed. -False|\[Error message\]|Yes|The TaskRun failed with a permanent error (usually validation). -False|TaskRunCancelled|Yes|The TaskRun was cancelled successfully. -False|TaskRunTimeout|Yes|The TaskRun timed out. -False|TaskRunImagePullFailed|Yes|The TaskRun failed due to one of its steps not being able to pull the image. +`status`|`reason`|`message`|`completionTime` is set|Description +:-------|:-------|:--|:---------------------:|--------------: +Unknown|Started|n/a|No|The TaskRun has just been picked up by the controller. +Unknown|Pending|n/a|No|The TaskRun is waiting on a Pod in status Pending. +Unknown|Running|n/a|No|The TaskRun has been validated and started to perform its work. +Unknown|TaskRunCancelled|n/a|No|The user requested the TaskRun to be cancelled. Cancellation has not been done yet. +True|Succeeded|n/a|Yes|The TaskRun completed successfully. +False|Failed|n/a|Yes|The TaskRun failed because one of the steps failed. +False|\[Error message\]|n/a|No|The TaskRun encountered a non-permanent error, and it's still running. It may ultimately succeed. +False|\[Error message\]|n/a|Yes|The TaskRun failed with a permanent error (usually validation). +False|TaskRunCancelled|n/a|Yes|The TaskRun was cancelled successfully. +False|TaskRunCancelled|TaskRun cancelled as the PipelineRun it belongs to has timed out.|Yes|The TaskRun was cancelled because the PipelineRun timed out. +False|TaskRunTimeout|n/a|Yes|The TaskRun timed out. +False|TaskRunImagePullFailed|n/a|Yes|The TaskRun failed due to one of its steps not being able to pull the image. When a `TaskRun` changes status, [events](events.md#taskruns) are triggered accordingly. diff --git a/pkg/apis/pipeline/v1alpha1/run_types.go b/pkg/apis/pipeline/v1alpha1/run_types.go index d7ca61a05cb..8470bd687e2 100644 --- a/pkg/apis/pipeline/v1alpha1/run_types.go +++ b/pkg/apis/pipeline/v1alpha1/run_types.go @@ -102,6 +102,8 @@ const ( // RunCancelledByPipelineMsg indicates that the PipelineRun of which part this Run was // has been cancelled. RunCancelledByPipelineMsg RunSpecStatusMessage = "Run cancelled as the PipelineRun it belongs to has been cancelled." + // RunCancelledByPipelineTimeoutMsg indicates that the Run was cancelled because the PipelineRun running it timed out. + RunCancelledByPipelineTimeoutMsg RunSpecStatusMessage = "Run cancelled as the PipelineRun it belongs to has timed out." ) // GetParam gets the Param from the RunSpec with the given name diff --git a/pkg/apis/pipeline/v1beta1/openapi_generated.go b/pkg/apis/pipeline/v1beta1/openapi_generated.go index e669b1041d7..ce651b1752d 100644 --- a/pkg/apis/pipeline/v1beta1/openapi_generated.go +++ b/pkg/apis/pipeline/v1beta1/openapi_generated.go @@ -1764,6 +1764,12 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref common.ReferenceCall }, }, }, + "finallyStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, }, }, }, @@ -1882,6 +1888,12 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref common.Referen }, }, }, + "finallyStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, }, }, }, diff --git a/pkg/apis/pipeline/v1beta1/pipelinerun_types.go b/pkg/apis/pipeline/v1beta1/pipelinerun_types.go index 6810a852ed2..e3f8bffc25f 100644 --- a/pkg/apis/pipeline/v1beta1/pipelinerun_types.go +++ b/pkg/apis/pipeline/v1beta1/pipelinerun_types.go @@ -174,6 +174,40 @@ func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bo return false } +// HaveTasksTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Tasks +func (pr *PipelineRun) HaveTasksTimedOut(ctx context.Context, c clock.PassiveClock) bool { + timeout := pr.TasksTimeout() + startTime := pr.Status.StartTime + + if !startTime.IsZero() && timeout != nil { + if timeout.Duration == config.NoTimeoutDuration { + return false + } + runtime := c.Since(startTime.Time) + if runtime > timeout.Duration { + return true + } + } + return false +} + +// HasFinallyTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Finally, based on status.FinallyStartTime +func (pr *PipelineRun) HasFinallyTimedOut(ctx context.Context, c clock.PassiveClock) bool { + timeout := pr.FinallyTimeout() + startTime := pr.Status.FinallyStartTime + + if startTime != nil && !startTime.IsZero() && timeout != nil { + if timeout.Duration == config.NoTimeoutDuration { + return false + } + runtime := c.Since(startTime.Time) + if runtime > timeout.Duration { + return true + } + } + return false +} + // HasVolumeClaimTemplate returns true if PipelineRun contains volumeClaimTemplates that is // used for creating PersistentVolumeClaims with an OwnerReference for each run func (pr *PipelineRun) HasVolumeClaimTemplate() bool { @@ -418,6 +452,10 @@ type PipelineRunStatusFields struct { // +optional // +listType=atomic ChildReferences []ChildStatusReference `json:"childReferences,omitempty"` + + // FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed. + // +optional + FinallyStartTime *metav1.Time `json:"finallyStartTime,omitempty"` } // SkippedTask is used to describe the Tasks that were skipped due to their When Expressions @@ -450,6 +488,12 @@ const ( GracefullyStoppedSkip SkippingReason = "PipelineRun was gracefully stopped" // MissingResultsSkip means the task was skipped because it's missing necessary results MissingResultsSkip SkippingReason = "Results were missing" + // PipelineTimedOutSkip means the task was skipped because the PipelineRun has passed its overall timeout. + PipelineTimedOutSkip SkippingReason = "PipelineRun timeout has been reached" + // TasksTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Tasks. + TasksTimedOutSkip SkippingReason = "PipelineRun Tasks timeout has been reached" + // FinallyTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Finally. + FinallyTimedOutSkip SkippingReason = "PipelineRun Finally timeout has been reached" // None means the task was not skipped None SkippingReason = "None" ) diff --git a/pkg/apis/pipeline/v1beta1/pipelinerun_types_test.go b/pkg/apis/pipeline/v1beta1/pipelinerun_types_test.go index 5b4a0055b21..211d5fb52db 100644 --- a/pkg/apis/pipeline/v1beta1/pipelinerun_types_test.go +++ b/pkg/apis/pipeline/v1beta1/pipelinerun_types_test.go @@ -286,6 +286,37 @@ func TestPipelineRunHasTimedOut(t *testing.T) { t.Errorf("Expected HasTimedOut to be %t when using pipeline.timeouts.pipeline", tc.expected) } }) + t.Run("pipeline.timeouts.tasks "+tc.name, func(t *testing.T) { + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: v1beta1.PipelineRunSpec{ + Timeouts: &v1beta1.TimeoutFields{Tasks: &metav1.Duration{Duration: tc.timeout}}, + }, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + StartTime: &metav1.Time{Time: tc.starttime}, + }}, + } + + if pr.HaveTasksTimedOut(context.Background(), testClock) != tc.expected { + t.Errorf("Expected HasTimedOut to be %t when using pipeline.timeouts.pipeline", tc.expected) + } + }) + t.Run("pipeline.timeouts.finally "+tc.name, func(t *testing.T) { + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: v1beta1.PipelineRunSpec{ + Timeouts: &v1beta1.TimeoutFields{Finally: &metav1.Duration{Duration: tc.timeout}}, + }, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + StartTime: &metav1.Time{Time: tc.starttime}, + FinallyStartTime: &metav1.Time{Time: tc.starttime}, + }}, + } + + if pr.HasFinallyTimedOut(context.Background(), testClock) != tc.expected { + t.Errorf("Expected HasTimedOut to be %t when using pipeline.timeouts.pipeline", tc.expected) + } + }) } } diff --git a/pkg/apis/pipeline/v1beta1/swagger.json b/pkg/apis/pipeline/v1beta1/swagger.json index fe343f0e272..374fab49e65 100644 --- a/pkg/apis/pipeline/v1beta1/swagger.json +++ b/pkg/apis/pipeline/v1beta1/swagger.json @@ -1015,6 +1015,10 @@ "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, + "finallyStartTime": { + "description": "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + "$ref": "#/definitions/v1.Time" + }, "observedGeneration": { "description": "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", "type": "integer", @@ -1079,6 +1083,10 @@ "description": "CompletionTime is the time the PipelineRun completed.", "$ref": "#/definitions/v1.Time" }, + "finallyStartTime": { + "description": "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + "$ref": "#/definitions/v1.Time" + }, "pipelineResults": { "description": "PipelineResults are the list of results written out by the pipeline task's containers", "type": "array", diff --git a/pkg/apis/pipeline/v1beta1/taskrun_types.go b/pkg/apis/pipeline/v1beta1/taskrun_types.go index 98cd2327e2c..1e6cffefd20 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -101,6 +101,8 @@ const ( // TaskRunCancelledByPipelineMsg indicates that the PipelineRun of which this // TaskRun was a part of has been cancelled. TaskRunCancelledByPipelineMsg TaskRunSpecStatusMessage = "TaskRun cancelled as the PipelineRun it belongs to has been cancelled." + // TaskRunCancelledByPipelineTimeoutMsg indicates that the TaskRun was cancelled because the PipelineRun running it timed out. + TaskRunCancelledByPipelineTimeoutMsg TaskRunSpecStatusMessage = "TaskRun cancelled as the PipelineRun it belongs to has timed out." ) // TaskRunDebug defines the breakpoint config for a particular TaskRun diff --git a/pkg/apis/pipeline/v1beta1/taskrun_types_test.go b/pkg/apis/pipeline/v1beta1/taskrun_types_test.go index 32bc6702c34..26b99bbb6d0 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_types_test.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_types_test.go @@ -120,7 +120,7 @@ func TestTaskRunIsDone(t *testing.T) { }, } if !tr.IsDone() { - t.Fatal("Expected pipelinerun status to be done") + t.Fatal("Expected taskrun status to be done") } } @@ -131,11 +131,7 @@ func TestTaskRunIsCancelled(t *testing.T) { }, } if !tr.IsCancelled() { - t.Fatal("Expected pipelinerun status to be cancelled") - } - expected := "" - if string(tr.Spec.StatusMessage) != expected { - t.Fatalf("Expected StatusMessage is %s but got %s", expected, tr.Spec.StatusMessage) + t.Fatal("Expected taskrun status to be cancelled") } } diff --git a/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go index 3cd690fef66..5570ac2fd65 100644 --- a/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go @@ -709,6 +709,10 @@ func (in *PipelineRunStatusFields) DeepCopyInto(out *PipelineRunStatusFields) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.FinallyStartTime != nil { + in, out := &in.FinallyStartTime, &out.FinallyStartTime + *out = (*in).DeepCopy() + } return } diff --git a/pkg/reconciler/pipelinerun/cancel.go b/pkg/reconciler/pipelinerun/cancel.go index 4e0faf59d47..7a9f3f02fb6 100644 --- a/pkg/reconciler/pipelinerun/cancel.go +++ b/pkg/reconciler/pipelinerun/cancel.go @@ -25,7 +25,6 @@ import ( "time" "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" @@ -34,6 +33,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" ) @@ -108,9 +108,14 @@ func cancelPipelineRun(ctx context.Context, logger *zap.SugaredLogger, pr *v1bet // cancelPipelineTaskRuns patches `TaskRun` and `Run` with canceled status func cancelPipelineTaskRuns(ctx context.Context, logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) []string { + return cancelPipelineTaskRunsForTaskNames(ctx, logger, pr, clientSet, sets.NewString()) +} + +// cancelPipelineTaskRunsForTaskNames patches `TaskRun`s and `Run`s for the given task names, or all if no task names are given, with canceled status +func cancelPipelineTaskRunsForTaskNames(ctx context.Context, logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface, taskNames sets.String) []string { errs := []string{} - trNames, runNames, err := getChildObjectsFromPRStatus(ctx, pr.Status) + trNames, runNames, err := getChildObjectsFromPRStatusForTaskNames(ctx, pr.Status, taskNames) if err != nil { errs = append(errs, err.Error()) } @@ -136,9 +141,9 @@ func cancelPipelineTaskRuns(ctx context.Context, logger *zap.SugaredLogger, pr * return errs } -// getChildObjectsFromPRStatus returns taskruns and runs in the PipelineRunStatus's ChildReferences or TaskRuns/Runs, -// based on the value of the embedded status flag. -func getChildObjectsFromPRStatus(ctx context.Context, prs v1beta1.PipelineRunStatus) ([]string, []string, error) { +// getChildObjectsFromPRStatusForTaskNames returns taskruns and runs in the PipelineRunStatus's ChildReferences or TaskRuns/Runs, +// based on the value of the embedded status flag and the given set of PipelineTask names. If that set is empty, all are returned. +func getChildObjectsFromPRStatusForTaskNames(ctx context.Context, prs v1beta1.PipelineRunStatus, taskNames sets.String) ([]string, []string, error) { cfg := config.FromContextOrDefaults(ctx) var trNames []string @@ -147,21 +152,27 @@ func getChildObjectsFromPRStatus(ctx context.Context, prs v1beta1.PipelineRunSta if cfg.FeatureFlags.EmbeddedStatus != config.FullEmbeddedStatus { for _, cr := range prs.ChildReferences { - switch cr.Kind { - case "TaskRun": - trNames = append(trNames, cr.Name) - case "Run": - runNames = append(runNames, cr.Name) - default: - unknownChildKinds[cr.Name] = cr.Kind + if taskNames.Len() == 0 || taskNames.Has(cr.PipelineTaskName) { + switch cr.Kind { + case "TaskRun": + trNames = append(trNames, cr.Name) + case "Run": + runNames = append(runNames, cr.Name) + default: + unknownChildKinds[cr.Name] = cr.Kind + } } } } else { - for trName := range prs.TaskRuns { - trNames = append(trNames, trName) + for trName, trs := range prs.TaskRuns { + if taskNames.Len() == 0 || taskNames.Has(trs.PipelineTaskName) { + trNames = append(trNames, trName) + } } - for runName := range prs.Runs { - runNames = append(runNames, runName) + for runName, runStatus := range prs.Runs { + if taskNames.Len() == 0 || taskNames.Has(runStatus.PipelineTaskName) { + runNames = append(runNames, runName) + } } } diff --git a/pkg/reconciler/pipelinerun/cancel_test.go b/pkg/reconciler/pipelinerun/cancel_test.go index 4bd9b3ac438..9e18f0c4082 100644 --- a/pkg/reconciler/pipelinerun/cancel_test.go +++ b/pkg/reconciler/pipelinerun/cancel_test.go @@ -20,6 +20,8 @@ import ( "context" "testing" + "k8s.io/apimachinery/pkg/util/sets" + "github.com/google/go-cmp/cmp" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/test/diff" @@ -274,11 +276,12 @@ func TestCancelPipelineRun(t *testing.T) { } } -func TestGetChildObjectsFromPRStatus(t *testing.T) { +func TestGetChildObjectsFromPRStatusForTaskNames(t *testing.T) { testCases := []struct { name string embeddedStatus string prStatus v1beta1.PipelineRunStatus + taskNames sets.String expectedTRNames []string expectedRunNames []string hasError bool @@ -319,6 +322,21 @@ func TestGetChildObjectsFromPRStatus(t *testing.T) { expectedTRNames: []string{"t1"}, expectedRunNames: []string{"r1"}, hasError: false, + }, { + name: "taskrun and run, default embedded, just want taskrun", + embeddedStatus: config.DefaultEmbeddedStatus, + prStatus: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "t1": {PipelineTaskName: "task-1"}, + }, + Runs: map[string]*v1beta1.PipelineRunRunStatus{ + "r1": {PipelineTaskName: "run-1"}, + }, + }}, + taskNames: sets.NewString("task-1"), + expectedTRNames: []string{"t1"}, + expectedRunNames: nil, + hasError: false, }, { name: "full embedded", embeddedStatus: config.FullEmbeddedStatus, @@ -402,7 +420,7 @@ func TestGetChildObjectsFromPRStatus(t *testing.T) { cfg.OnConfigChanged(withCustomTasks(withEmbeddedStatus(newFeatureFlagsConfigMap(), tc.embeddedStatus))) ctx = cfg.ToContext(ctx) - trNames, runNames, err := getChildObjectsFromPRStatus(ctx, tc.prStatus) + trNames, runNames, err := getChildObjectsFromPRStatusForTaskNames(ctx, tc.prStatus, tc.taskNames) if tc.hasError { if err == nil { diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index c4343069f39..941dcf30682 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -25,11 +25,11 @@ import ( "reflect" "strconv" "strings" - "time" + + "k8s.io/apimachinery/pkg/util/sets" "github.com/hashicorp/go-multierror" "github.com/tektoncd/pipeline/pkg/apis/config" - apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" @@ -110,6 +110,9 @@ const ( // ReasonCouldntCancel indicates that a PipelineRun was cancelled but attempting to update // all of the running TaskRuns as cancelled failed. ReasonCouldntCancel = "PipelineRunCouldntCancel" + // ReasonCouldntTimeOut indicates that a PipelineRun was timed out but attempting to update + // all of the running TaskRuns as timed out failed. + ReasonCouldntTimeOut = "PipelineRunCouldntTimeOut" // ReasonInvalidTaskResultReference indicates a task result was declared // but was not initialized by that task ReasonInvalidTaskResultReference = "InvalidTaskResultReference" @@ -236,8 +239,17 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) if pr.Status.StartTime != nil { // Compute the time since the task started. elapsed := c.Clock.Since(pr.Status.StartTime.Time) - // Snooze this resource until the timeout has elapsed. - return controller.NewRequeueAfter(pr.PipelineTimeout(ctx) - elapsed) + // Snooze this resource until the appropriate timeout has elapsed. + waitTime := pr.PipelineTimeout(ctx) - elapsed + if pr.Status.FinallyStartTime == nil && pr.TasksTimeout() != nil { + waitTime = pr.TasksTimeout().Duration - elapsed + } else if pr.FinallyTimeout() != nil { + finallyWaitTime := pr.FinallyTimeout().Duration - c.Clock.Since(pr.Status.FinallyStartTime.Time) + if finallyWaitTime < waitTime { + waitTime = finallyWaitTime + } + } + return controller.NewRequeueAfter(waitTime) } return nil } @@ -513,6 +525,24 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun, get SpecStatus: pr.Spec.Status, TasksGraph: d, FinalTasksGraph: dfinally, + TimeoutsState: resources.PipelineRunTimeoutsState{ + Clock: c.Clock, + }, + } + if pr.Status.StartTime != nil { + pipelineRunFacts.TimeoutsState.StartTime = &pr.Status.StartTime.Time + } + if pr.Status.FinallyStartTime != nil { + pipelineRunFacts.TimeoutsState.FinallyStartTime = &pr.Status.FinallyStartTime.Time + } + if tasksTimeout := pr.TasksTimeout(); tasksTimeout != nil { + pipelineRunFacts.TimeoutsState.TasksTimeout = &tasksTimeout.Duration + } + if finallyTimeout := pr.FinallyTimeout(); finallyTimeout != nil { + pipelineRunFacts.TimeoutsState.FinallyTimeout = &finallyTimeout.Duration + } + if pipelineTimeout := pr.PipelineTimeout(ctx); pipelineTimeout != 0 { + pipelineRunFacts.TimeoutsState.PipelineTimeout = &pipelineTimeout } for _, rpt := range pipelineRunFacts.State { @@ -584,6 +614,41 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun, get return controller.NewPermanentError(err) } + if pr.Status.FinallyStartTime == nil { + if pr.HaveTasksTimedOut(ctx, c.Clock) { + tasksToTimeOut := sets.NewString() + for _, pt := range pipelineRunFacts.State { + if !pt.IsFinalTask(pipelineRunFacts) && pt.IsRunning() { + tasksToTimeOut.Insert(pt.PipelineTask.Name) + } + } + if tasksToTimeOut.Len() > 0 { + logger.Infof("PipelineRun tasks timeout of %s reached, cancelling tasks", pr.Spec.Timeouts.Tasks.Duration.String()) + errs := timeoutPipelineTasksForTaskNames(ctx, logger, pr, c.PipelineClientSet, tasksToTimeOut) + if len(errs) > 0 { + errString := strings.Join(errs, "\n") + logger.Errorf("Failed to timeout tasks for PipelineRun %s/%s: %s", pr.Name, pr.Name, errString) + return fmt.Errorf("error(s) from cancelling TaskRun(s) from PipelineRun %s: %s", pr.Name, errString) + } + } + } + } else if pr.HasFinallyTimedOut(ctx, c.Clock) { + tasksToTimeOut := sets.NewString() + for _, pt := range pipelineRunFacts.State { + if pt.IsFinalTask(pipelineRunFacts) && pt.IsRunning() { + tasksToTimeOut.Insert(pt.PipelineTask.Name) + } + } + if tasksToTimeOut.Len() > 0 { + logger.Infof("PipelineRun finally timeout of %s reached, cancelling finally tasks", pr.Spec.Timeouts.Finally.Duration.String()) + errs := timeoutPipelineTasksForTaskNames(ctx, logger, pr, c.PipelineClientSet, tasksToTimeOut) + if len(errs) > 0 { + errString := strings.Join(errs, "\n") + logger.Errorf("Failed to timeout finally tasks for PipelineRun %s/%s: %s", pr.Name, pr.Name, errString) + return fmt.Errorf("error(s) from cancelling TaskRun(s) from PipelineRun %s: %s", pr.Name, errString) + } + } + } if err := c.runNextSchedulableTask(ctx, pr, pipelineRunFacts, as); err != nil { return err } @@ -595,6 +660,13 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun, get // Reset the skipped status to trigger recalculation pipelineRunFacts.ResetSkippedCache() + // If the pipelinerun has timed out, mark tasks as timed out and update status + if pr.HasTimedOut(ctx, c.Clock) { + if err := timeoutPipelineRun(ctx, logger, pr, c.PipelineClientSet); err != nil { + return err + } + } + after := pipelineRunFacts.GetPipelineConditionStatus(ctx, pr, logger, c.Clock) switch after.Status { case corev1.ConditionTrue: @@ -631,8 +703,9 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun, get return nil } -// processRunTimeouts custom tasks are requested to cancel, if they have timed out. Custom tasks can do any cleanup -// during this step. +// processRunTimeouts custom tasks are requested to cancel, if they have timed out. Since there is no guarantee that a +// custom task reconciler has its own logic for timing out a Run, this needs to be handled by the PipelineRun reconciler. +// Custom tasks can do any cleanup during this step. func (c *Reconciler) processRunTimeouts(ctx context.Context, pr *v1beta1.PipelineRun, pipelineState resources.PipelineRunState) error { errs := []string{} logger := logging.FromContext(ctx) @@ -641,7 +714,7 @@ func (c *Reconciler) processRunTimeouts(ctx context.Context, pr *v1beta1.Pipelin } for _, rpt := range pipelineState { if rpt.IsCustomTask() { - if rpt.Run != nil && !rpt.Run.IsCancelled() && (pr.HasTimedOut(ctx, c.Clock) || (rpt.Run.HasTimedOut(c.Clock) && !rpt.Run.IsDone())) { + if rpt.Run != nil && !rpt.Run.IsCancelled() && rpt.Run.HasTimedOut(c.Clock) && !rpt.Run.IsDone() { logger.Infof("Cancelling run task: %s due to timeout.", rpt.RunName) err := cancelRun(ctx, rpt.RunName, pr.Namespace, c.PipelineClientSet) if err != nil { @@ -705,46 +778,34 @@ func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1beta1.Pip } for _, rpt := range nextRpts { + if rpt.IsFinalTask(pipelineRunFacts) { + c.setFinallyStartedTimeIfNeeded(pr, pipelineRunFacts) + } if rpt == nil || rpt.Skip(pipelineRunFacts).IsSkipped || rpt.IsFinallySkipped(pipelineRunFacts).IsSkipped { continue } + switch { case rpt.IsCustomTask() && rpt.IsMatrixed(): - if rpt.IsFinalTask(pipelineRunFacts) { - rpt.Runs, err = c.createRuns(ctx, rpt, pr, getFinallyTaskRunTimeout) - } else { - rpt.Runs, err = c.createRuns(ctx, rpt, pr, getTaskRunTimeout) - } + rpt.Runs, err = c.createRuns(ctx, rpt, pr) if err != nil { recorder.Eventf(pr, corev1.EventTypeWarning, "RunsCreationFailed", "Failed to create Runs %q: %v", rpt.RunNames, err) return fmt.Errorf("error creating Runs called %s for PipelineTask %s from PipelineRun %s: %w", rpt.RunNames, rpt.PipelineTask.Name, pr.Name, err) } case rpt.IsCustomTask(): - if rpt.IsFinalTask(pipelineRunFacts) { - rpt.Run, err = c.createRun(ctx, rpt.RunName, nil, rpt, pr, getFinallyTaskRunTimeout) - } else { - rpt.Run, err = c.createRun(ctx, rpt.RunName, nil, rpt, pr, getTaskRunTimeout) - } + rpt.Run, err = c.createRun(ctx, rpt.RunName, nil, rpt, pr) if err != nil { recorder.Eventf(pr, corev1.EventTypeWarning, "RunCreationFailed", "Failed to create Run %q: %v", rpt.RunName, err) return fmt.Errorf("error creating Run called %s for PipelineTask %s from PipelineRun %s: %w", rpt.RunName, rpt.PipelineTask.Name, pr.Name, err) } case rpt.IsMatrixed(): - if rpt.IsFinalTask(pipelineRunFacts) { - rpt.TaskRuns, err = c.createTaskRuns(ctx, rpt, pr, as.StorageBasePath(pr), getFinallyTaskRunTimeout) - } else { - rpt.TaskRuns, err = c.createTaskRuns(ctx, rpt, pr, as.StorageBasePath(pr), getTaskRunTimeout) - } + rpt.TaskRuns, err = c.createTaskRuns(ctx, rpt, pr, as.StorageBasePath(pr)) if err != nil { recorder.Eventf(pr, corev1.EventTypeWarning, "TaskRunsCreationFailed", "Failed to create TaskRuns %q: %v", rpt.TaskRunNames, err) return fmt.Errorf("error creating TaskRuns called %s for PipelineTask %s from PipelineRun %s: %w", rpt.TaskRunNames, rpt.PipelineTask.Name, pr.Name, err) } default: - if rpt.IsFinalTask(pipelineRunFacts) { - rpt.TaskRun, err = c.createTaskRun(ctx, rpt.TaskRunName, nil, rpt, pr, as.StorageBasePath(pr), getFinallyTaskRunTimeout) - } else { - rpt.TaskRun, err = c.createTaskRun(ctx, rpt.TaskRunName, nil, rpt, pr, as.StorageBasePath(pr), getTaskRunTimeout) - } + rpt.TaskRun, err = c.createTaskRun(ctx, rpt.TaskRunName, nil, rpt, pr, as.StorageBasePath(pr)) if err != nil { recorder.Eventf(pr, corev1.EventTypeWarning, "TaskRunCreationFailed", "Failed to create TaskRun %q: %v", rpt.TaskRunName, err) return fmt.Errorf("error creating TaskRun called %s for PipelineTask %s from PipelineRun %s: %w", rpt.TaskRunName, rpt.PipelineTask.Name, pr.Name, err) @@ -755,6 +816,16 @@ func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1beta1.Pip return nil } +// setFinallyStartedTimeIfNeeded sets the PipelineRun.Status.FinallyStartedTime to the current time if it's nil. +func (c *Reconciler) setFinallyStartedTimeIfNeeded(pr *v1beta1.PipelineRun, facts *resources.PipelineRunFacts) { + if pr.Status.FinallyStartTime == nil { + pr.Status.FinallyStartTime = &metav1.Time{Time: c.Clock.Now()} + } + if facts.TimeoutsState.FinallyStartTime == nil { + facts.TimeoutsState.FinallyStartTime = &pr.Status.FinallyStartTime.Time + } +} + // updateTaskRunsStatusDirectly is used with "full" or "both" set as the value for the "embedded-status" feature flag. // When the "full" and "both" options are removed, updateTaskRunsStatusDirectly can be removed. func (c *Reconciler) updateTaskRunsStatusDirectly(pr *v1beta1.PipelineRun) error { @@ -790,14 +861,12 @@ func (c *Reconciler) updateRunsStatusDirectly(pr *v1beta1.PipelineRun) error { return nil } -type getTimeoutFunc func(ctx context.Context, pr *v1beta1.PipelineRun, rpt *resources.ResolvedPipelineTask, c clock.PassiveClock) *metav1.Duration - -func (c *Reconciler) createTaskRuns(ctx context.Context, rpt *resources.ResolvedPipelineTask, pr *v1beta1.PipelineRun, storageBasePath string, getTimeoutFunc getTimeoutFunc) ([]*v1beta1.TaskRun, error) { +func (c *Reconciler) createTaskRuns(ctx context.Context, rpt *resources.ResolvedPipelineTask, pr *v1beta1.PipelineRun, storageBasePath string) ([]*v1beta1.TaskRun, error) { var taskRuns []*v1beta1.TaskRun matrixCombinations := matrix.FanOut(rpt.PipelineTask.Matrix).ToMap() for i, taskRunName := range rpt.TaskRunNames { params := matrixCombinations[strconv.Itoa(i)] - taskRun, err := c.createTaskRun(ctx, taskRunName, params, rpt, pr, storageBasePath, getTimeoutFunc) + taskRun, err := c.createTaskRun(ctx, taskRunName, params, rpt, pr, storageBasePath) if err != nil { return nil, err } @@ -806,7 +875,7 @@ func (c *Reconciler) createTaskRuns(ctx context.Context, rpt *resources.Resolved return taskRuns, nil } -func (c *Reconciler) createTaskRun(ctx context.Context, taskRunName string, params []v1beta1.Param, rpt *resources.ResolvedPipelineTask, pr *v1beta1.PipelineRun, storageBasePath string, getTimeoutFunc getTimeoutFunc) (*v1beta1.TaskRun, error) { +func (c *Reconciler) createTaskRun(ctx context.Context, taskRunName string, params []v1beta1.Param, rpt *resources.ResolvedPipelineTask, pr *v1beta1.PipelineRun, storageBasePath string) (*v1beta1.TaskRun, error) { logger := logging.FromContext(ctx) tr, _ := c.taskRunLister.TaskRuns(pr.Namespace).Get(taskRunName) @@ -839,13 +908,16 @@ func (c *Reconciler) createTaskRun(ctx context.Context, taskRunName string, para Spec: v1beta1.TaskRunSpec{ Params: params, ServiceAccountName: taskRunSpec.TaskServiceAccountName, - Timeout: getTimeoutFunc(ctx, pr, rpt, c.Clock), PodTemplate: taskRunSpec.TaskPodTemplate, StepOverrides: taskRunSpec.StepOverrides, SidecarOverrides: taskRunSpec.SidecarOverrides, ComputeResources: taskRunSpec.ComputeResources, }} + if rpt.PipelineTask.Timeout != nil { + tr.Spec.Timeout = rpt.PipelineTask.Timeout + } + if rpt.ResolvedTaskResources.TaskName != "" { // We pass the entire, original task ref because it may contain additional references like a Bundle url. tr.Spec.TaskRef = rpt.PipelineTask.TaskRef @@ -869,12 +941,12 @@ func (c *Reconciler) createTaskRun(ctx context.Context, taskRunName string, para return c.PipelineClientSet.TektonV1beta1().TaskRuns(pr.Namespace).Create(ctx, tr, metav1.CreateOptions{}) } -func (c *Reconciler) createRuns(ctx context.Context, rpt *resources.ResolvedPipelineTask, pr *v1beta1.PipelineRun, getTimeoutFunc getTimeoutFunc) ([]*v1alpha1.Run, error) { +func (c *Reconciler) createRuns(ctx context.Context, rpt *resources.ResolvedPipelineTask, pr *v1beta1.PipelineRun) ([]*v1alpha1.Run, error) { var runs []*v1alpha1.Run matrixCombinations := matrix.FanOut(rpt.PipelineTask.Matrix).ToMap() for i, runName := range rpt.RunNames { params := matrixCombinations[strconv.Itoa(i)] - run, err := c.createRun(ctx, runName, params, rpt, pr, getTimeoutFunc) + run, err := c.createRun(ctx, runName, params, rpt, pr) if err != nil { return nil, err } @@ -883,7 +955,7 @@ func (c *Reconciler) createRuns(ctx context.Context, rpt *resources.ResolvedPipe return runs, nil } -func (c *Reconciler) createRun(ctx context.Context, runName string, params []v1beta1.Param, rpt *resources.ResolvedPipelineTask, pr *v1beta1.PipelineRun, getTimeoutFunc getTimeoutFunc) (*v1alpha1.Run, error) { +func (c *Reconciler) createRun(ctx context.Context, runName string, params []v1beta1.Param, rpt *resources.ResolvedPipelineTask, pr *v1beta1.PipelineRun) (*v1alpha1.Run, error) { logger := logging.FromContext(ctx) taskRunSpec := pr.GetTaskRunSpec(rpt.PipelineTask.Name) params = append(params, rpt.PipelineTask.Params...) @@ -900,11 +972,14 @@ func (c *Reconciler) createRun(ctx context.Context, runName string, params []v1b Ref: rpt.PipelineTask.TaskRef, Params: params, ServiceAccountName: taskRunSpec.TaskServiceAccountName, - Timeout: getTimeoutFunc(ctx, pr, rpt, c.Clock), PodTemplate: taskRunSpec.TaskPodTemplate, }, } + if rpt.PipelineTask.Timeout != nil { + r.Spec.Timeout = rpt.PipelineTask.Timeout + } + if rpt.PipelineTask.TaskSpec != nil { j, err := json.Marshal(rpt.PipelineTask.TaskSpec.Spec) if err != nil { @@ -1123,63 +1198,6 @@ func addMetadataByPrecedence(metadata map[string]string, addedMetadata map[strin } } -// getFinallyTaskRunTimeout returns the timeout to set when creating the ResolvedPipelineTask, which is a finally Task. -// If there is no timeout for the finally TaskRun, returns 0. -// If pipeline level timeouts have already been exceeded, returns 1 second. -func getFinallyTaskRunTimeout(ctx context.Context, pr *v1beta1.PipelineRun, rpt *resources.ResolvedPipelineTask, c clock.PassiveClock) *metav1.Duration { - taskRunTimeout := calculateTaskRunTimeout(pr.PipelineTimeout(ctx), pr, rpt, c) - finallyTimeout := pr.FinallyTimeout() - // Return the smaller of taskRunTimeout and finallyTimeout - // This works because all finally tasks run in parallel, so there is no need to consider time spent by other finally tasks - // TODO(#4071): Account for time spent since finally task was first started (i.e. retries) - if finallyTimeout == nil || finallyTimeout.Duration == apisconfig.NoTimeoutDuration { - return taskRunTimeout - } - if finallyTimeout.Duration < taskRunTimeout.Duration { - return finallyTimeout - } - return taskRunTimeout -} - -// getTaskRunTimeout returns the timeout to set when creating the ResolvedPipelineTask. -// If there is no timeout for the TaskRun, returns 0. -// If pipeline level timeouts have already been exceeded, returns 1 second. -func getTaskRunTimeout(ctx context.Context, pr *v1beta1.PipelineRun, rpt *resources.ResolvedPipelineTask, c clock.PassiveClock) *metav1.Duration { - var timeout time.Duration - if pr.TasksTimeout() != nil { - timeout = pr.TasksTimeout().Duration - } else { - timeout = pr.PipelineTimeout(ctx) - } - return calculateTaskRunTimeout(timeout, pr, rpt, c) -} - -// calculateTaskRunTimeout returns the timeout to set when creating the ResolvedPipelineTask. -// `timeout` is: -// - If ResolvedPipelineTask is a Task, `timeout` is the minimum of Tasks Timeout and Pipeline Timeout -// - If ResolvedPipelineTask is a Finally Task, `timeout` is the Pipeline Timeout -// If there is no timeout for the TaskRun, returns 0. -// If pipeline level timeouts have already been exceeded, returns 1 second. -func calculateTaskRunTimeout(timeout time.Duration, pr *v1beta1.PipelineRun, rpt *resources.ResolvedPipelineTask, c clock.PassiveClock) *metav1.Duration { - if timeout != apisconfig.NoTimeoutDuration { - pElapsedTime := c.Since(pr.Status.StartTime.Time) - if pElapsedTime > timeout { - return &metav1.Duration{Duration: 1 * time.Second} - } - timeRemaining := (timeout - pElapsedTime) - // Return the smaller of timeRemaining and rpt.pipelineTask.timeout - if rpt.PipelineTask.Timeout != nil && rpt.PipelineTask.Timeout.Duration < timeRemaining { - return &metav1.Duration{Duration: rpt.PipelineTask.Timeout.Duration} - } - return &metav1.Duration{Duration: timeRemaining} - } - - if timeout == apisconfig.NoTimeoutDuration && rpt.PipelineTask.Timeout != nil { - return &metav1.Duration{Duration: rpt.PipelineTask.Timeout.Duration} - } - return &metav1.Duration{Duration: apisconfig.NoTimeoutDuration} -} - func (c *Reconciler) updateLabelsAndAnnotations(ctx context.Context, pr *v1beta1.PipelineRun) (*v1beta1.PipelineRun, error) { newPr, err := c.pipelineRunLister.PipelineRuns(pr.Namespace).Get(pr.Name) if err != nil { diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index a427530fc34..b02a0ec8bad 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -88,6 +88,7 @@ var ( ignoreLastTransitionTime = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time") ignoreStartTime = cmpopts.IgnoreFields(v1beta1.PipelineRunStatusFields{}, "StartTime") ignoreCompletionTime = cmpopts.IgnoreFields(v1beta1.PipelineRunStatusFields{}, "CompletionTime") + ignoreFinallyStartTime = cmpopts.IgnoreFields(v1beta1.PipelineRunStatusFields{}, "FinallyStartTime") trueb = true simpleHelloWorldTask = &v1beta1.Task{ObjectMeta: baseObjectMeta("hello-world", "foo")} simpleSomeTask = &v1beta1.Task{ObjectMeta: baseObjectMeta("some-task", "foo")} @@ -568,7 +569,6 @@ spec: taskRef: name: unit-test-task kind: Task - timeout: 1h0m0s `) // ignore IgnoreUnexported ignore both after and before steps fields if d := cmp.Diff(expectedTaskRun, actual, ignoreTypeMeta, cmpopts.SortSlices(lessTaskResourceBindings)); d != "" { @@ -640,7 +640,6 @@ spec: kind: Example retries: 3 serviceAccountName: default - timeout: 1h0m0s ` tcs := []struct { @@ -702,7 +701,6 @@ spec: spec: field1: 123 field2: value - timeout: 1h0m0s `), }, { name: "custom task with workspace", @@ -740,7 +738,6 @@ spec: apiVersion: example.dev/v0 kind: Example serviceAccountName: default - timeout: 1h0m0s workspaces: - name: taskws persistentVolumeClaim: @@ -874,7 +871,6 @@ spec: - name: mystep image: myimage serviceAccountName: %s - timeout: 1h0m0s `, config.DefaultServiceAccountValue)) expectedTaskRun.ObjectMeta = taskRunObjectMeta("test-pipeline-run-success-unit-test-task-spec", "foo", "test-pipeline-run-success", "test-pipeline", "unit-test-task-spec", false) @@ -1696,7 +1692,7 @@ status: }, { Operation: "add", Path: "/spec/statusMessage", - Value: "Run cancelled as the PipelineRun it belongs to has been cancelled.", + Value: string(v1alpha1.RunCancelledByPipelineMsg), }} if d := cmp.Diff(got, want); d != "" { t.Fatalf("Expected cancel patch operation, but got a mismatch %s", diff.PrintWantGot(d)) @@ -1743,16 +1739,44 @@ spec: name: test-pipeline serviceAccountName: test-sa status: + conditions: + - message: running... + reason: Running + status: Unknown + type: Succeeded startTime: "2021-12-31T00:00:00Z" + runs: + test-pipeline-run-custom-task-hello-world-1: + pipelineTaskName: hello-world-1 + status: + conditions: + - status: Unknown + type: Succeeded `)} prs[0].Spec.Timeout = tc.timeout prs[0].Spec.Timeouts = tc.timeouts + runs := []*v1alpha1.Run{mustParseRunWithObjectMeta(t, + taskRunObjectMeta("test-pipeline-run-custom-task-hello-world-1", "test", "test-pipeline-run-custom-task", + "test-pipeline", "hello-world-1", true), + ` +spec: + ref: + apiVersion: example.dev/v0 + kind: Example +status: + conditions: + - status: Unknown + type: Succeeded + startTime: "2021-12-31T11:58:59Z" +`)} + cms := []*corev1.ConfigMap{withCustomTasks(newFeatureFlagsConfigMap())} d := test.Data{ PipelineRuns: prs, Pipelines: ps, ConfigMaps: cms, + Runs: runs, } prt := newPipelineRunTest(d, t) defer prt.Cancel() @@ -1760,22 +1784,9 @@ status: wantEvents := []string{ fmt.Sprintf("Warning Failed PipelineRun \"%s\" failed to finish within \"12h0m0s\"", prName), } - runName := "test-pipeline-run-custom-task-hello-world-1" reconciledRun, clients := prt.reconcileRun("test", prName, wantEvents, false) - postReconcileRun, err := clients.Pipeline.TektonV1alpha1().Runs("test").Get(prt.TestAssets.Ctx, runName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Run get request failed, %v", err) - } - - gotTimeoutValue := postReconcileRun.GetTimeout() - expectedTimeoutValue := time.Second - - if d := cmp.Diff(gotTimeoutValue, expectedTimeoutValue); d != "" { - t.Fatalf("Expected timeout for created Run, but got a mismatch %s", diff.PrintWantGot(d)) - } - if reconciledRun.Status.CompletionTime == nil { t.Errorf("Expected a CompletionTime on already timedout PipelineRun but was nil") } @@ -2483,7 +2494,8 @@ spec: func TestReconcileWithTimeoutDeprecated(t *testing.T) { // TestReconcileWithTimeoutDeprecated runs "Reconcile" on a PipelineRun that has timed out. - // It verifies that reconcile is successful, the pipeline status updated and events generated. + // It verifies that reconcile is successful, no TaskRun is created, the PipelineTask is marked as skipped, and the + // pipeline status updated and events generated. ps := []*v1beta1.Pipeline{simpleHelloWorldPipeline} prs := []*v1beta1.PipelineRun{parse.MustParsePipelineRun(t, ` metadata: @@ -2510,7 +2522,7 @@ status: wantEvents := []string{ "Warning Failed PipelineRun \"test-pipeline-run-with-timeout\" failed to finish within \"12h0m0s\"", } - reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run-with-timeout", wantEvents, false) + reconciledRun, _ := prt.reconcileRun("foo", "test-pipeline-run-with-timeout", wantEvents, false) if reconciledRun.Status.CompletionTime == nil { t.Errorf("Expected a CompletionTime on invalid PipelineRun but was nil") @@ -2521,19 +2533,31 @@ status: t.Errorf("Expected PipelineRun to be timed out, but condition reason is %s", reconciledRun.Status.GetCondition(apis.ConditionSucceeded)) } - // Check that the expected TaskRun was created - actual := getTaskRunCreations(t, clients.Pipeline.Actions(), 2)[0] - - // The TaskRun timeout should be less than or equal to the PipelineRun timeout. - if actual.Spec.Timeout.Duration > prs[0].Spec.Timeout.Duration { - t.Errorf("TaskRun timeout %s should be less than or equal to PipelineRun timeout %s", actual.Spec.Timeout.Duration.String(), prs[0].Spec.Timeout.Duration.String()) + // Check that there is a skipped task for the expected reason + if len(reconciledRun.Status.SkippedTasks) != 1 { + t.Errorf("expected one skipped task, found %d", len(reconciledRun.Status.SkippedTasks)) + } else if reconciledRun.Status.SkippedTasks[0].Reason != v1beta1.PipelineTimedOutSkip { + t.Errorf("expected skipped reason to be '%s', but was '%s", v1beta1.PipelineTimedOutSkip, reconciledRun.Status.SkippedTasks[0].Reason) } } -func TestReconcileWithTimeouts(t *testing.T) { - // TestReconcileWithTimeouts runs "Reconcile" on a PipelineRun that has timed out. - // It verifies that reconcile is successful, the pipeline status updated and events generated. - ps := []*v1beta1.Pipeline{simpleHelloWorldPipeline} +func TestReconcileWithTimeouts_Pipeline(t *testing.T) { + // TestReconcileWithTimeouts_Pipeline runs "Reconcile" on a PipelineRun that has timed out. + // It verifies that reconcile is successful, no TaskRun is created, the PipelineTask is marked as skipped, and the + // pipeline status updated and events generated. + ps := []*v1beta1.Pipeline{parse.MustParsePipeline(t, ` +metadata: + name: test-pipeline + namespace: foo +spec: + tasks: + - name: hello-world-1 + taskRef: + name: hello-world + - name: hello-world-2 + taskRef: + name: hello-world +`)} prs := []*v1beta1.PipelineRun{parse.MustParsePipelineRun(t, ` metadata: name: test-pipeline-run-with-timeout @@ -2546,13 +2570,32 @@ spec: pipeline: 12h0m0s status: startTime: "2021-12-31T00:00:00Z" + taskRuns: + test-pipeline-run-with-timeout-hello-world-1: + pipelineTaskName: hello-world-1 + status: + conditions: + - lastTransitionTime: null + status: "Unknown" + type: Succeeded `)} ts := []*v1beta1.Task{simpleHelloWorldTask} + trs := []*v1beta1.TaskRun{mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-with-timeout-hello-world-1", "foo", "test-pipeline-run-with-timeout", + "test-pipeline", "hello-world-1", false), ` +spec: + resources: {} + serviceAccountName: test-sa + taskRef: + name: hello-world + kind: Task +`)} + d := test.Data{ PipelineRuns: prs, Pipelines: ps, Tasks: ts, + TaskRuns: trs, } prt := newPipelineRunTest(d, t) defer prt.Cancel() @@ -2571,12 +2614,234 @@ status: t.Errorf("Expected PipelineRun to be timed out, but condition reason is %s", reconciledRun.Status.GetCondition(apis.ConditionSucceeded)) } - // Check that the expected TaskRun was created - actual := getTaskRunCreations(t, clients.Pipeline.Actions(), 2)[0] + // Check that there is a skipped task for the expected reason + if len(reconciledRun.Status.SkippedTasks) != 1 { + t.Errorf("expected one skipped task, found %d", len(reconciledRun.Status.SkippedTasks)) + } else if reconciledRun.Status.SkippedTasks[0].Reason != v1beta1.PipelineTimedOutSkip { + t.Errorf("expected skipped reason to be '%s', but was '%s", v1beta1.PipelineTimedOutSkip, reconciledRun.Status.SkippedTasks[0].Reason) + } - // The TaskRun timeout should be less than or equal to the PipelineRun timeout. - if actual.Spec.Timeout.Duration > prs[0].Spec.Timeouts.Pipeline.Duration { - t.Errorf("TaskRun timeout %s should be less than or equal to PipelineRun timeout %s", actual.Spec.Timeout.Duration.String(), prs[0].Spec.Timeouts.Pipeline.Duration.String()) + updatedTaskRun, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").Get(context.Background(), trs[0].Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error getting updated TaskRun: %#v", err) + } + + if updatedTaskRun.Spec.Status != v1beta1.TaskRunSpecStatusCancelled { + t.Errorf("expected existing TaskRun Spec.Status to be set to %s, but was %s", v1beta1.TaskRunSpecStatusCancelled, updatedTaskRun.Spec.Status) + } + if updatedTaskRun.Spec.StatusMessage != v1beta1.TaskRunCancelledByPipelineTimeoutMsg { + t.Errorf("expected existing TaskRun Spec.StatusMessage to be set to %s, but was %s", v1beta1.TaskRunCancelledByPipelineTimeoutMsg, updatedTaskRun.Spec.StatusMessage) + } +} + +func TestReconcileWithTimeouts_Tasks(t *testing.T) { + // TestReconcileWithTimeouts_Tasks runs "Reconcile" on a PipelineRun with timeouts.tasks configured. + // It verifies that reconcile is successful, no TaskRun is created, the PipelineTask is marked as skipped, and the + // pipeline status updated and events generated. + ps := []*v1beta1.Pipeline{parse.MustParsePipeline(t, ` +metadata: + name: test-pipeline + namespace: foo +spec: + tasks: + - name: hello-world-1 + taskRef: + name: hello-world + - name: hello-world-2 + taskRef: + name: hello-world +`)} + prs := []*v1beta1.PipelineRun{parse.MustParsePipelineRun(t, ` +metadata: + name: test-pipeline-run-with-timeout + namespace: foo +spec: + pipelineRef: + name: test-pipeline + serviceAccountName: test-sa + timeouts: + tasks: 2m +status: + startTime: "2021-12-31T23:55:00Z" + taskRuns: + test-pipeline-run-with-timeout-hello-world-1: + pipelineTaskName: hello-world-1 + status: + conditions: + - lastTransitionTime: null + status: "Unknown" + type: Succeeded +`)} + ts := []*v1beta1.Task{simpleHelloWorldTask} + + trs := []*v1beta1.TaskRun{mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-with-timeout-hello-world-1", "foo", "test-pipeline-run-with-timeout", + "test-pipeline", "hello-world-1", false), ` +spec: + resources: {} + serviceAccountName: test-sa + taskRef: + name: hello-world + kind: Task +status: + conditions: + - lastTransitionTime: null + status: "Unknown" + type: Succeeded +`)} + + d := test.Data{ + PipelineRuns: prs, + Pipelines: ps, + Tasks: ts, + TaskRuns: trs, + } + prt := newPipelineRunTest(d, t) + defer prt.Cancel() + + wantEvents := []string{ + "Normal Started", + } + reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run-with-timeout", wantEvents, false) + + if reconciledRun.Status.CompletionTime != nil { + t.Errorf("Expected nil CompletionTime on PipelineRun but was %s", reconciledRun.Status.CompletionTime) + } + + // The PipelineRun should be running. + if reconciledRun.Status.GetCondition(apis.ConditionSucceeded).Reason != v1beta1.PipelineRunReasonRunning.String() { + t.Errorf("Expected PipelineRun to be running, but condition reason is %s", reconciledRun.Status.GetCondition(apis.ConditionSucceeded).Reason) + } + + // Check that there is a skipped task for the expected reason + if len(reconciledRun.Status.SkippedTasks) != 1 { + t.Errorf("expected one skipped task, found %d", len(reconciledRun.Status.SkippedTasks)) + } else if reconciledRun.Status.SkippedTasks[0].Reason != v1beta1.TasksTimedOutSkip { + t.Errorf("expected skipped reason to be '%s', but was '%s", v1beta1.TasksTimedOutSkip, reconciledRun.Status.SkippedTasks[0].Reason) + } + + updatedTaskRun, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").Get(context.Background(), trs[0].Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error getting updated TaskRun: %#v", err) + } + + if updatedTaskRun.Spec.Status != v1beta1.TaskRunSpecStatusCancelled { + t.Errorf("expected existing TaskRun Spec.Status to be set to %s, but was %s", v1beta1.TaskRunSpecStatusCancelled, updatedTaskRun.Spec.Status) + } + if updatedTaskRun.Spec.StatusMessage != v1beta1.TaskRunCancelledByPipelineTimeoutMsg { + t.Errorf("expected existing TaskRun Spec.StatusMessage to be set to %s, but was %s", v1beta1.TaskRunCancelledByPipelineTimeoutMsg, updatedTaskRun.Spec.StatusMessage) + } +} + +func TestReconcileWithTimeouts_Finally(t *testing.T) { + // TestReconcileWithTimeouts_Finally runs "Reconcile" on a PipelineRun with timeouts.finally configured. + // It verifies that reconcile is successful, no TaskRun is created, the PipelineTask is marked as skipped, and the + // pipeline status updated and events generated. + ps := []*v1beta1.Pipeline{parse.MustParsePipeline(t, ` +metadata: + name: test-pipeline-with-finally + namespace: foo +spec: + finally: + - name: finaltask-1 + taskRef: + name: hello-world + - name: finaltask-2 + taskRef: + name: hello-world + tasks: + - name: task1 + taskRef: + name: hello-world +`)} + prs := []*v1beta1.PipelineRun{parse.MustParsePipelineRun(t, ` +metadata: + name: test-pipeline-run-with-timeout + namespace: foo +spec: + pipelineRef: + name: test-pipeline-with-finally + serviceAccountName: test-sa + timeouts: + finally: 15m +status: + finallyStartTime: "2021-12-31T23:44:59Z" + startTime: "2021-12-31T23:40:00Z" + taskRuns: + test-pipeline-run-with-timeout-hello-world: + pipelineTaskName: task1 + status: + conditions: + - lastTransitionTime: null + status: "True" + type: Succeeded + test-pipeline-run-with-timeout-finaltask-1: + pipelineTaskName: finaltask-1 + status: + conditions: + - lastTransitionTime: null + status: "Unknown" + type: Succeeded +`)} + ts := []*v1beta1.Task{simpleHelloWorldTask} + trs := []*v1beta1.TaskRun{ + getTaskRun( + t, + "test-pipeline-run-with-timeout-hello-world", + prs[0].Name, + ps[0].Name, + "hello-world", + corev1.ConditionTrue, + ), + mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-with-timeout-finaltask-1", "foo", "test-pipeline-run-with-timeout", + "test-pipeline", "finaltask-1", false), ` +spec: + resources: {} + serviceAccountName: test-sa + taskRef: + name: hello-world + kind: Task +`)} + + d := test.Data{ + PipelineRuns: prs, + Pipelines: ps, + Tasks: ts, + TaskRuns: trs, + } + prt := newPipelineRunTest(d, t) + defer prt.Cancel() + + wantEvents := []string{ + "Normal Started", + } + reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run-with-timeout", wantEvents, false) + + if reconciledRun.Status.CompletionTime != nil { + t.Errorf("Expected a nil CompletionTime on running PipelineRun but was %s", reconciledRun.Status.CompletionTime.String()) + } + + // The PipelineRun should be running. + if reconciledRun.Status.GetCondition(apis.ConditionSucceeded).Reason != v1beta1.PipelineRunReasonRunning.String() { + t.Errorf("Expected PipelineRun to be running, but condition reason is %s", reconciledRun.Status.GetCondition(apis.ConditionSucceeded).Reason) + } + + // Check that there is a skipped task for the expected reason + if len(reconciledRun.Status.SkippedTasks) != 1 { + t.Errorf("expected one skipped task, found %d", len(reconciledRun.Status.SkippedTasks)) + } else if reconciledRun.Status.SkippedTasks[0].Reason != v1beta1.FinallyTimedOutSkip { + t.Errorf("expected skipped reason to be '%s', but was '%s", v1beta1.FinallyTimedOutSkip, reconciledRun.Status.SkippedTasks[0].Reason) + } + + updatedTaskRun, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").Get(context.Background(), trs[1].Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error getting updated TaskRun: %#v", err) + } + + if updatedTaskRun.Spec.Status != v1beta1.TaskRunSpecStatusCancelled { + t.Errorf("expected existing TaskRun Spec.Status to be set to %s, but was %s", v1beta1.TaskRunSpecStatusCancelled, updatedTaskRun.Spec.Status) + } + if updatedTaskRun.Spec.StatusMessage != v1beta1.TaskRunCancelledByPipelineTimeoutMsg { + t.Errorf("expected existing TaskRun Spec.StatusMessage to be set to %s, but was %s", v1beta1.TaskRunCancelledByPipelineTimeoutMsg, updatedTaskRun.Spec.StatusMessage) } } @@ -2766,6 +3031,120 @@ spec: } } +func TestReconcileFailsTaskRunTimeOut(t *testing.T) { + prName := "test-pipeline-fails-to-timeout" + + // TestReconcileFailsTaskRunTimeOut runs "Reconcile" on a PipelineRun with a single TaskRun. + // The TaskRun cannot be timed out. Check that the pipelinerun timeout fails, that reconcile fails and + // an event is generated + names.TestingSeed() + prs := []*v1beta1.PipelineRun{parse.MustParsePipelineRun(t, ` +metadata: + name: test-pipeline-fails-to-timeout + namespace: foo +spec: + pipelineRef: + name: test-pipeline + timeout: 1h0m0s +status: + conditions: + - message: running... + reason: Running + status: Unknown + type: Succeeded + startTime: "2021-12-31T22:59:00Z" + taskRuns: + test-pipeline-fails-to-timeouthello-world-1: + pipelineTaskName: hello-world-1 +`)} + ps := []*v1beta1.Pipeline{parse.MustParsePipeline(t, ` +metadata: + name: test-pipeline + namespace: foo +spec: + tasks: + - name: hello-world-1 + taskRef: + name: hello-world + - name: hello-world-2 + taskRef: + name: hello-world +`)} + tasks := []*v1beta1.Task{simpleHelloWorldTask} + taskRuns := []*v1beta1.TaskRun{ + getTaskRun( + t, + "test-pipeline-fails-to-timeouthello-world-1", + prName, + "test-pipeline", + "hello-world", + corev1.ConditionUnknown, + ), + } + + cms := []*corev1.ConfigMap{withEnabledAlphaAPIFields(newFeatureFlagsConfigMap())} + + d := test.Data{ + PipelineRuns: prs, + Pipelines: ps, + Tasks: tasks, + TaskRuns: taskRuns, + ConfigMaps: cms, + } + + testAssets, cancel := getPipelineRunController(t, d) + defer cancel() + c := testAssets.Controller + clients := testAssets.Clients + failingReactorActivated := true + + // Make the patch call fail, i.e. make it so that the controller fails to cancel the TaskRun + clients.Pipeline.PrependReactor("patch", "taskruns", func(action ktesting.Action) (bool, runtime.Object, error) { + return failingReactorActivated, nil, fmt.Errorf("i'm sorry Dave, i'm afraid i can't do that") + }) + + err := c.Reconciler.Reconcile(testAssets.Ctx, "foo/test-pipeline-fails-to-timeout") + if err == nil { + t.Errorf("Expected to see error returned from reconcile after failing to timeout TaskRun but saw none!") + } + + // Check that the PipelineRun is still running with correct error message + reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Get(testAssets.Ctx, "test-pipeline-fails-to-timeout", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Somehow had error getting reconciled run out of fake client: %s", err) + } + + if val, ok := reconciledRun.GetLabels()[pipeline.PipelineLabelKey]; !ok { + t.Fatalf("expected pipeline label") + } else if d := cmp.Diff("test-pipeline", val); d != "" { + t.Errorf("expected to see pipeline label. Diff %s", diff.PrintWantGot(d)) + } + + // The PipelineRun should not be timed out b/c we couldn't timeout the TaskRun + checkPipelineRunConditionStatusAndReason(t, reconciledRun, corev1.ConditionUnknown, ReasonCouldntTimeOut) + // The event here is "Normal" because in case we fail to timeout we leave the condition to unknown + // Further reconcile might converge then the status of the pipeline. + // See https://github.com/tektoncd/pipeline/issues/2647 for further details. + wantEvents := []string{ + "Normal PipelineRunCouldntTimeOut PipelineRun \"test-pipeline-fails-to-timeout\" was timed out but had errors trying to time out TaskRuns and/or Runs", + "Warning InternalError 1 error occurred", + } + err = eventstest.CheckEventsOrdered(t, testAssets.Recorder.Events, prName, wantEvents) + if err != nil { + t.Errorf(err.Error()) + } + + // Turn off failing reactor and retry reconciliation + failingReactorActivated = false + + err = c.Reconciler.Reconcile(testAssets.Ctx, "foo/test-pipeline-fails-to-timeout") + if err == nil { + // No error is ok + } else if ok, _ := controller.IsRequeueKey(err); !ok { // Requeue is also fine. + t.Errorf("Expected to timeout TaskRun successfully!") + } +} + func TestReconcilePropagateLabelsAndAnnotations(t *testing.T) { names.TestingSeed() @@ -2797,7 +3176,6 @@ spec: taskRef: name: hello-world kind: Task - timeout: 1h0m0s `) d := test.Data{ @@ -2935,7 +3313,6 @@ spec: taskRef: name: hello-world-task kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta(taskRunNames[1], "foo", "test-pipeline-run-different-service-accs", "test-pipeline", "hello-world-1", false), @@ -2946,7 +3323,6 @@ spec: taskRef: name: hello-world-task kind: Task - timeout: 1h0m0s `), } @@ -3108,7 +3484,7 @@ status: reconciledRun, _ := prt.reconcileRun("foo", "test-pipeline-retry-run-with-timeout", []string{}, false) if len(reconciledRun.Status.TaskRuns["hello-world-1"].Status.RetriesStatus) != tc.retries { - t.Fatalf(" %d retry expected but %d ", tc.retries, len(reconciledRun.Status.TaskRuns["hello-world-1"].Status.RetriesStatus)) + t.Fatalf(" %d retries expected but got %d ", tc.retries, len(reconciledRun.Status.TaskRuns["hello-world-1"].Status.RetriesStatus)) } if status := reconciledRun.Status.TaskRuns["hello-world-1"].Status.GetCondition(apis.ConditionSucceeded).Status; status != tc.conditionSucceeded { @@ -3118,376 +3494,6 @@ status: } } -func TestGetTaskRunTimeout(t *testing.T) { - prName := "pipelinerun-timeouts" - ns := "foo" - p := "pipeline" - - tcs := []struct { - name string - timeoutDuration *metav1.Duration - timeoutFields *v1beta1.TimeoutFields - startTime time.Time - rpt *resources.ResolvedPipelineTask - expected *metav1.Duration - }{{ - name: "nil timeout duration", - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: nil, - }, - }, - expected: &metav1.Duration{Duration: 60 * time.Minute}, - }, { - name: "timeout specified in pr", - timeoutDuration: &metav1.Duration{Duration: 20 * time.Minute}, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: nil, - }, - }, - expected: &metav1.Duration{Duration: 20 * time.Minute}, - }, { - name: "0 timeout duration", - timeoutDuration: &metav1.Duration{Duration: 0 * time.Minute}, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: nil, - }, - }, - expected: &metav1.Duration{Duration: 0 * time.Minute}, - }, { - name: "taskrun being created after timeout expired", - timeoutDuration: &metav1.Duration{Duration: 1 * time.Minute}, - startTime: now.Add(-2 * time.Minute), - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: nil, - }, - }, - expected: &metav1.Duration{Duration: 1 * time.Second}, - }, { - name: "taskrun being created with timeout for PipelineTask", - timeoutDuration: &metav1.Duration{Duration: 20 * time.Minute}, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 2 * time.Minute}, - }, - }, - expected: &metav1.Duration{Duration: 2 * time.Minute}, - }, { - name: "0 timeout duration for PipelineRun, PipelineTask timeout still applied", - timeoutDuration: &metav1.Duration{Duration: 0 * time.Minute}, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 2 * time.Minute}, - }, - }, - expected: &metav1.Duration{Duration: 2 * time.Minute}, - }, { - name: "taskstimeout specified in pr", - timeoutFields: &v1beta1.TimeoutFields{ - Tasks: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: nil, - }, - }, - expected: &metav1.Duration{Duration: 20 * time.Minute}, - }, { - name: "40m timeout duration, 20m taskstimeout duration", - timeoutFields: &v1beta1.TimeoutFields{ - Pipeline: &metav1.Duration{Duration: 40 * time.Minute}, - Tasks: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: nil, - }, - }, - expected: &metav1.Duration{Duration: 20 * time.Minute}, - }, { - name: "taskrun being created with taskstimeout for PipelineTask", - timeoutFields: &v1beta1.TimeoutFields{ - Tasks: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 2 * time.Minute}, - }, - }, - expected: &metav1.Duration{Duration: 2 * time.Minute}, - }, { - name: "tasks.timeout < pipeline.tasks[].timeout", - timeoutFields: &v1beta1.TimeoutFields{ - Tasks: &metav1.Duration{Duration: 1 * time.Minute}, - }, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 2 * time.Minute}, - }, - }, - expected: &metav1.Duration{Duration: 1 * time.Minute}, - }, { - name: "taskrun with elapsed time; timeouts.tasks applies", - timeoutFields: &v1beta1.TimeoutFields{ - Tasks: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now.Add(-10 * time.Minute), - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{}, - TaskRun: &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - StartTime: nil, - }, - }, - }, - }, - expected: &metav1.Duration{Duration: 10 * time.Minute}, - }, { - name: "taskrun with elapsed time; task.timeout applies", - timeoutFields: &v1beta1.TimeoutFields{ - Tasks: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now.Add(-10 * time.Minute), - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 15 * time.Minute}, - }, - TaskRun: &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - StartTime: nil, - }, - }, - }, - }, - expected: &metav1.Duration{Duration: 10 * time.Minute}, - }, { - name: "taskrun with elapsed time; timeouts.pipeline applies", - timeoutFields: &v1beta1.TimeoutFields{ - Tasks: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now.Add(-10 * time.Minute), - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 15 * time.Minute}, - }, - TaskRun: &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - StartTime: nil, - }, - }, - }, - }, - expected: &metav1.Duration{Duration: 10 * time.Minute}, - }} - - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - pr := &v1beta1.PipelineRun{ - ObjectMeta: baseObjectMeta(prName, ns), - Spec: v1beta1.PipelineRunSpec{ - PipelineRef: &v1beta1.PipelineRef{Name: p}, - Timeout: tc.timeoutDuration, - Timeouts: tc.timeoutFields, - }, - Status: v1beta1.PipelineRunStatus{ - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - StartTime: &metav1.Time{Time: tc.startTime}, - }, - }, - } - if d := cmp.Diff(getTaskRunTimeout(context.TODO(), pr, tc.rpt, testClock), tc.expected); d != "" { - t.Errorf("Unexpected task run timeout. Diff %s", diff.PrintWantGot(d)) - } - }) - } -} - -func TestGetFinallyTaskRunTimeout(t *testing.T) { - prName := "pipelinerun-finallyTimeouts" - ns := "foo" - p := "pipeline" - - tcs := []struct { - name string - timeoutDuration *metav1.Duration - timeoutFields *v1beta1.TimeoutFields - startTime time.Time - pr *v1beta1.PipelineRun - rpt *resources.ResolvedPipelineTask - expected *metav1.Duration - }{{ - name: "nil timeout duration", - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{}, - }, - expected: &metav1.Duration{Duration: 60 * time.Minute}, - }, { - name: "timeout specified in pr", - timeoutDuration: &metav1.Duration{Duration: 20 * time.Minute}, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{}, - }, - expected: &metav1.Duration{Duration: 20 * time.Minute}, - }, { - name: "taskrun being created after timeout expired", - timeoutDuration: &metav1.Duration{Duration: 1 * time.Minute}, - startTime: now.Add(-2 * time.Minute), - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{}, - }, - expected: &metav1.Duration{Duration: 1 * time.Second}, - }, { - name: "40m timeout duration, 20m taskstimeout duration", - timeoutFields: &v1beta1.TimeoutFields{ - Pipeline: &metav1.Duration{Duration: 40 * time.Minute}, - Tasks: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{}, - }, - expected: &metav1.Duration{Duration: 20 * time.Minute}, - }, { - name: "only timeouts.finally set", - timeoutFields: &v1beta1.TimeoutFields{ - Finally: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{}, - }, - expected: &metav1.Duration{Duration: 20 * time.Minute}, - }, { - name: "40m timeout duration, 20m taskstimeout duration, 20m finallytimeout duration", - timeoutFields: &v1beta1.TimeoutFields{ - Pipeline: &metav1.Duration{Duration: 40 * time.Minute}, - Tasks: &metav1.Duration{Duration: 20 * time.Minute}, - Finally: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{}, - }, - expected: &metav1.Duration{Duration: 20 * time.Minute}, - }, { - name: "use pipeline.finally[].timeout", - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 2 * time.Minute}, - }, - }, - expected: &metav1.Duration{Duration: 2 * time.Minute}, - }, { - name: "finally timeout < pipeline.finally[].timeout", - timeoutFields: &v1beta1.TimeoutFields{ - Pipeline: &metav1.Duration{Duration: 40 * time.Minute}, - Finally: &metav1.Duration{Duration: 1 * time.Minute}, - }, - startTime: now, - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 2 * time.Minute}, - }, - }, - expected: &metav1.Duration{Duration: 1 * time.Minute}, - }, { - name: "finally taskrun with elapsed time; tasks.finally applies", - timeoutFields: &v1beta1.TimeoutFields{ - Finally: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now.Add(-10 * time.Minute), - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{}, - TaskRun: &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - StartTime: nil, - }, - }, - }, - }, - expected: &metav1.Duration{Duration: 20 * time.Minute}, - }, { - name: "finally taskrun with elapsed time; task.timeout applies", - timeoutFields: &v1beta1.TimeoutFields{ - Finally: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now.Add(-10 * time.Minute), - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 15 * time.Minute}, - }, - TaskRun: &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - StartTime: nil, - }, - }, - }, - }, - expected: &metav1.Duration{Duration: 15 * time.Minute}, - }, { - name: "finally taskrun with elapsed time; timeouts.pipeline applies", - timeoutFields: &v1beta1.TimeoutFields{ - Pipeline: &metav1.Duration{Duration: 21 * time.Minute}, - Finally: &metav1.Duration{Duration: 20 * time.Minute}, - }, - startTime: now.Add(-10 * time.Minute), - rpt: &resources.ResolvedPipelineTask{ - PipelineTask: &v1beta1.PipelineTask{ - Timeout: &metav1.Duration{Duration: 15 * time.Minute}, - }, - TaskRun: &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - StartTime: nil, - }, - }, - }, - }, - expected: &metav1.Duration{Duration: 11 * time.Minute}, - }} - - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - pr := &v1beta1.PipelineRun{ - ObjectMeta: baseObjectMeta(prName, ns), - Spec: v1beta1.PipelineRunSpec{ - PipelineRef: &v1beta1.PipelineRef{Name: p}, - Timeout: tc.timeoutDuration, - Timeouts: tc.timeoutFields, - }, - Status: v1beta1.PipelineRunStatus{ - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - StartTime: &metav1.Time{Time: tc.startTime}, - }, - }, - } - if d := cmp.Diff(tc.expected, getFinallyTaskRunTimeout(context.TODO(), pr, tc.rpt, testClock)); d != "" { - t.Errorf("Unexpected finally task run timeout. Diff %s", diff.PrintWantGot(d)) - } - }) - } -} - // TestReconcileAndPropagateCustomPipelineTaskRunSpec tests that custom PipelineTaskRunSpec declared // in PipelineRun is propagated to created TaskRuns func TestReconcileAndPropagateCustomPipelineTaskRunSpec(t *testing.T) { @@ -3545,10 +3551,9 @@ spec: taskRef: name: hello-world kind: Task - timeout: 1h0m0s `) - if d := cmp.Diff(actual, expectedTaskRun, ignoreTypeMeta); d != "" { + if d := cmp.Diff(expectedTaskRun, actual, ignoreTypeMeta); d != "" { t.Errorf("expected to see propagated custom ServiceAccountName and PodTemplate in TaskRun %v created. Diff %s", expectedTaskRun, diff.PrintWantGot(d)) } } @@ -3774,7 +3779,6 @@ spec: taskRef: name: b-task kind: Task - timeout: 1h0m0s `) // Check that the expected TaskRun was created actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ @@ -3789,7 +3793,7 @@ spec: t.Fatalf("Expected 1 TaskRuns got %d", len(actual.Items)) } actualTaskRun := actual.Items[0] - if d := cmp.Diff(&actualTaskRun, expectedTaskRun, ignoreResourceVersion, ignoreTypeMeta); d != "" { + if d := cmp.Diff(expectedTaskRun, &actualTaskRun, ignoreResourceVersion, ignoreTypeMeta); d != "" { t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) } @@ -3966,7 +3970,6 @@ spec: taskRef: name: %s kind: Task - timeout: 1h0m0s `, taskName)) actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ @@ -3981,7 +3984,7 @@ spec: t.Fatalf("Expected 1 TaskRun got %d", len(actual.Items)) } actualTaskRun := actual.Items[0] - if d := cmp.Diff(&actualTaskRun, expectedTaskRun, ignoreResourceVersion, ignoreTypeMeta); d != "" { + if d := cmp.Diff(expectedTaskRun, &actualTaskRun, ignoreResourceVersion, ignoreTypeMeta); d != "" { t.Errorf("expected to see TaskRun %v created. Diff %s", taskRunName, diff.PrintWantGot(d)) } } @@ -4645,7 +4648,6 @@ spec: taskRef: name: b-task kind: Task - timeout: 1h0m0s `) // Check that the expected TaskRun was created actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ @@ -4660,7 +4662,7 @@ spec: t.Fatalf("Expected 1 TaskRuns got %d", len(actual.Items)) } actualTaskRun := actual.Items[0] - if d := cmp.Diff(&actualTaskRun, expectedTaskRun, ignoreResourceVersion, ignoreTypeMeta); d != "" { + if d := cmp.Diff(expectedTaskRun, &actualTaskRun, ignoreResourceVersion, ignoreTypeMeta); d != "" { t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) } } @@ -4735,7 +4737,6 @@ spec: taskRef: kind: Task name: a-task - timeout: 1h0m0s `) // Check that the expected TaskRun was created (only) actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{}) @@ -6730,7 +6731,7 @@ spec: map[string]string{}, ) - if d := cmp.Diff(actualTaskRun, expectedTaskRun); d != "" { + if d := cmp.Diff(expectedTaskRun, actualTaskRun); d != "" { t.Fatalf("Expected TaskRuns to match, but got a mismatch: %s", d) } @@ -6823,7 +6824,6 @@ spec: taskRef: name: finaltask kind: Task - timeout: 1h0m0s `) // Check that the expected TaskRun was created actual, err := clients.Pipeline.TektonV1beta1().TaskRuns("foo").List(prt.TestAssets.Ctx, metav1.ListOptions{ @@ -6838,7 +6838,7 @@ spec: t.Fatalf("Expected 1 TaskRuns got %d", len(actual.Items)) } actualTaskRun := actual.Items[0] - if d := cmp.Diff(&actualTaskRun, expectedTaskRun, ignoreResourceVersion, ignoreTypeMeta); d != "" { + if d := cmp.Diff(expectedTaskRun, &actualTaskRun, ignoreResourceVersion, ignoreTypeMeta); d != "" { t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) } } @@ -7003,7 +7003,6 @@ spec: taskRef: name: final-task kind: Task - timeout: 1h0m0s `) // Check that the expected TaskRun was created @@ -7209,7 +7208,6 @@ spec: bundle: %s kind: Task name: unit-test-task - timeout: 1h0m0s `, ref)) if d := cmp.Diff(expectedTaskRun, actual, ignoreTypeMeta, cmpopts.SortSlices(lessTaskResourceBindings)); d != "" { @@ -7315,7 +7313,6 @@ spec: workspaces: - name: ws optional: true - timeout: 1h0m0s `) if d := cmp.Diff(expectedTaskRun, actual, ignoreTypeMeta, cmpopts.SortSlices(lessTaskResourceBindings)); d != "" { @@ -7734,7 +7731,6 @@ func getTaskRunWithTaskSpec(tr, pr, p, t string, labels, annotations map[string] }, ServiceAccountName: config.DefaultServiceAccountValue, Resources: &v1beta1.TaskRunResources{}, - Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, }, } } @@ -8140,10 +8136,9 @@ spec: taskRef: name: hello-world kind: Task - timeout: 1h0m0s `) - if d := cmp.Diff(actual, expectedTaskRun, ignoreTypeMeta); d != "" { + if d := cmp.Diff(expectedTaskRun, actual, ignoreTypeMeta); d != "" { t.Errorf("expected to see propagated metadata from PipelineTaskRunSpec in TaskRun %v created. Diff %s", expectedTaskRun, diff.PrintWantGot(d)) } } @@ -8213,10 +8208,9 @@ spec: steps: - name: foo-step image: foo-image - timeout: 1h0m0s `) - if d := cmp.Diff(actual, expectedTaskRun, ignoreTypeMeta); d != "" { + if d := cmp.Diff(expectedTaskRun, actual, ignoreTypeMeta); d != "" { t.Errorf("expected to see propagated metadata by the precedence from PipelineTaskRunSpec in TaskRun %v created. Diff %s", expectedTaskRun, diff.PrintWantGot(d)) } } @@ -8258,7 +8252,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-1", "foo", @@ -8277,7 +8270,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-2", "foo", @@ -8296,7 +8288,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-3", "foo", @@ -8315,7 +8306,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-4", "foo", @@ -8334,7 +8324,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-5", "foo", @@ -8353,7 +8342,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-6", "foo", @@ -8372,7 +8360,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-7", "foo", @@ -8391,7 +8378,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-8", "foo", @@ -8410,7 +8396,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), } cms := []*corev1.ConfigMap{withEmbeddedStatus(withEnabledAlphaAPIFields(newFeatureFlagsConfigMap()), config.MinimalEmbeddedStatus)} @@ -8581,7 +8566,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s status: conditions: - type: Succeeded @@ -8732,7 +8716,7 @@ spec: if err != nil { t.Fatalf("Got an error getting reconciled run out of fake client: %s", err) } - if d := cmp.Diff(tt.expectedPipelineRun, pipelineRun, ignoreResourceVersion, ignoreTypeMeta, ignoreLastTransitionTime, ignoreStartTime); d != "" { + if d := cmp.Diff(tt.expectedPipelineRun, pipelineRun, ignoreResourceVersion, ignoreTypeMeta, ignoreLastTransitionTime, ignoreStartTime, ignoreFinallyStartTime); d != "" { t.Errorf("expected PipelineRun was not created. Diff %s", diff.PrintWantGot(d)) } }) @@ -8802,7 +8786,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-1", "foo", @@ -8821,7 +8804,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-2", "foo", @@ -8840,7 +8822,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-3", "foo", @@ -8859,7 +8840,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-4", "foo", @@ -8878,7 +8858,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-5", "foo", @@ -8897,7 +8876,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-6", "foo", @@ -8916,7 +8894,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-7", "foo", @@ -8935,7 +8912,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-8", "foo", @@ -8954,7 +8930,6 @@ spec: taskRef: name: mytask kind: Task - timeout: 1h0m0s `), } cms := []*corev1.ConfigMap{withEmbeddedStatus(withEnabledAlphaAPIFields(newFeatureFlagsConfigMap()), config.MinimalEmbeddedStatus)} @@ -9345,7 +9320,7 @@ spec: if err != nil { t.Fatalf("Got an error getting reconciled run out of fake client: %s", err) } - if d := cmp.Diff(tt.expectedPipelineRun, pipelineRun, ignoreResourceVersion, ignoreTypeMeta, ignoreLastTransitionTime, ignoreStartTime); d != "" { + if d := cmp.Diff(tt.expectedPipelineRun, pipelineRun, ignoreResourceVersion, ignoreTypeMeta, ignoreLastTransitionTime, ignoreStartTime, ignoreFinallyStartTime); d != "" { t.Errorf("expected PipelineRun was not created. Diff %s", diff.PrintWantGot(d)) } }) @@ -9830,7 +9805,6 @@ spec: serviceAccountName: test-sa taskRef: name: mytask - timeout: 1h0m0s `), mustParseRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-1", "foo", @@ -9851,7 +9825,6 @@ spec: serviceAccountName: test-sa taskRef: name: mytask - timeout: 1h0m0s `), mustParseRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-2", "foo", @@ -9872,7 +9845,6 @@ spec: serviceAccountName: test-sa taskRef: name: mytask - timeout: 1h0m0s `), mustParseRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-3", "foo", @@ -9893,7 +9865,6 @@ spec: serviceAccountName: test-sa taskRef: name: mytask - timeout: 1h0m0s `), mustParseRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-4", "foo", @@ -9914,7 +9885,6 @@ spec: serviceAccountName: test-sa taskRef: name: mytask - timeout: 1h0m0s `), mustParseRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-5", "foo", @@ -9935,7 +9905,6 @@ spec: serviceAccountName: test-sa taskRef: name: mytask - timeout: 1h0m0s `), mustParseRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-6", "foo", @@ -9956,7 +9925,6 @@ spec: serviceAccountName: test-sa taskRef: name: mytask - timeout: 1h0m0s `), mustParseRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-7", "foo", @@ -9977,7 +9945,6 @@ spec: serviceAccountName: test-sa taskRef: name: mytask - timeout: 1h0m0s `), mustParseRunWithObjectMeta(t, taskRunObjectMeta("pr-platforms-and-browsers-8", "foo", @@ -9998,7 +9965,6 @@ spec: serviceAccountName: test-sa taskRef: name: mytask - timeout: 1h0m0s `), } cms := []*corev1.ConfigMap{withEmbeddedStatus(withEnabledAlphaAPIFields(newFeatureFlagsConfigMap()), config.MinimalEmbeddedStatus)} @@ -10320,7 +10286,7 @@ spec: if err != nil { t.Fatalf("Got an error getting reconciled run out of fake client: %s", err) } - if d := cmp.Diff(tt.expectedPipelineRun, pipelineRun, ignoreResourceVersion, ignoreTypeMeta, ignoreLastTransitionTime, ignoreStartTime); d != "" { + if d := cmp.Diff(tt.expectedPipelineRun, pipelineRun, ignoreResourceVersion, ignoreTypeMeta, ignoreLastTransitionTime, ignoreStartTime, ignoreFinallyStartTime); d != "" { t.Errorf("expected PipelineRun was not created. Diff %s", diff.PrintWantGot(d)) } }) @@ -10459,7 +10425,6 @@ spec: taskRef: name: unit-test-task kind: Task - timeout: 1h0m0s resources: {} `) // ignore IgnoreUnexported ignore both after and before steps fields diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go index 55fc974bbaf..6176b990aae 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go @@ -77,8 +77,8 @@ func (t ResolvedPipelineTask) isDone(facts *PipelineRunFacts) bool { return t.Skip(facts).IsSkipped || t.isSuccessful() || t.isFailure() } -// isRunning returns true only if the task is neither succeeded, cancelled nor failed -func (t ResolvedPipelineTask) isRunning() bool { +// IsRunning returns true only if the task is neither succeeded, cancelled nor failed +func (t ResolvedPipelineTask) IsRunning() bool { switch { case t.IsCustomTask() && t.IsMatrixed(): if len(t.Runs) == 0 { @@ -145,6 +145,9 @@ func (t ResolvedPipelineTask) isSuccessful() bool { // If the PipelineTask has a Matrix, isFailure returns true if any run has failed (no remaining retries) // and all other runs are done. func (t ResolvedPipelineTask) isFailure() bool { + if t.isCancelledForTimeOut() { + return true + } if t.isCancelled() { return true } @@ -238,6 +241,59 @@ func (t ResolvedPipelineTask) hasRemainingRetries() bool { return retriesDone < t.PipelineTask.Retries } +// isCancelledForTimeOut returns true only if the run is cancelled due to PipelineRun-controlled timeout +// If the PipelineTask has a Matrix, isCancelled returns true if any run is cancelled due to PipelineRun-controlled timeout and all other runs are done. +func (t ResolvedPipelineTask) isCancelledForTimeOut() bool { + switch { + case t.IsCustomTask() && t.IsMatrixed(): + if len(t.Runs) == 0 { + return false + } + isDone := true + atLeastOneCancelled := false + for _, run := range t.Runs { + isDone = isDone && run.IsDone() + c := run.Status.GetCondition(apis.ConditionSucceeded) + runCancelled := c.IsFalse() && + c.Reason == v1alpha1.RunReasonCancelled && + run.Spec.StatusMessage == v1alpha1.RunCancelledByPipelineTimeoutMsg + atLeastOneCancelled = atLeastOneCancelled || runCancelled + } + return atLeastOneCancelled && isDone + case t.IsCustomTask(): + if t.Run == nil { + return false + } + c := t.Run.Status.GetCondition(apis.ConditionSucceeded) + return c != nil && c.IsFalse() && + c.Reason == v1alpha1.RunReasonCancelled && + t.Run.Spec.StatusMessage == v1alpha1.RunCancelledByPipelineTimeoutMsg + case t.IsMatrixed(): + if len(t.TaskRuns) == 0 { + return false + } + isDone := true + atLeastOneCancelled := false + for _, taskRun := range t.TaskRuns { + isDone = isDone && taskRun.IsDone() + c := taskRun.Status.GetCondition(apis.ConditionSucceeded) + taskRunCancelled := c.IsFalse() && + c.Reason == v1beta1.TaskRunReasonCancelled.String() && + taskRun.Spec.StatusMessage == v1beta1.TaskRunCancelledByPipelineTimeoutMsg + atLeastOneCancelled = atLeastOneCancelled || taskRunCancelled + } + return atLeastOneCancelled && isDone + default: + if t.TaskRun == nil { + return false + } + c := t.TaskRun.Status.GetCondition(apis.ConditionSucceeded) + return c != nil && c.IsFalse() && + c.Reason == v1beta1.TaskRunReasonCancelled.String() && + t.TaskRun.Spec.StatusMessage == v1beta1.TaskRunCancelledByPipelineTimeoutMsg + } +} + // isCancelled returns true only if the run is cancelled // If the PipelineTask has a Matrix, isCancelled returns true if any run is cancelled and all other runs are done. func (t ResolvedPipelineTask) isCancelled() bool { @@ -346,6 +402,10 @@ func (t *ResolvedPipelineTask) skip(facts *PipelineRunFacts) TaskSkipStatus { skippingReason = v1beta1.MissingResultsSkip case t.skipBecauseWhenExpressionsEvaluatedToFalse(facts): skippingReason = v1beta1.WhenExpressionsSkip + case t.skipBecausePipelineRunPipelineTimeoutReached(facts): + skippingReason = v1beta1.PipelineTimedOutSkip + case t.skipBecausePipelineRunTasksTimeoutReached(facts): + skippingReason = v1beta1.TasksTimedOutSkip default: skippingReason = v1beta1.None } @@ -422,6 +482,45 @@ func (t *ResolvedPipelineTask) skipBecauseResultReferencesAreMissing(facts *Pipe return false } +// skipBecausePipelineRunPipelineTimeoutReached returns true if the task shouldn't be launched because the elapsed time since +// the PipelineRun started is greater than the PipelineRun's pipeline timeout +func (t *ResolvedPipelineTask) skipBecausePipelineRunPipelineTimeoutReached(facts *PipelineRunFacts) bool { + if t.checkParentsDone(facts) { + if facts.TimeoutsState.PipelineTimeout != nil && *facts.TimeoutsState.PipelineTimeout != config.NoTimeoutDuration && facts.TimeoutsState.StartTime != nil { + // If the elapsed time since the PipelineRun's start time is greater than the the PipelineRun's Pipeline timeout, skip. + return facts.TimeoutsState.Clock.Since(*facts.TimeoutsState.StartTime) > *facts.TimeoutsState.PipelineTimeout + } + } + + return false +} + +// skipBecausePipelineRunTasksTimeoutReached returns true if the task shouldn't be launched because the elapsed time since +// the PipelineRun started is greater than the PipelineRun's tasks timeout +func (t *ResolvedPipelineTask) skipBecausePipelineRunTasksTimeoutReached(facts *PipelineRunFacts) bool { + if t.checkParentsDone(facts) && !t.IsFinalTask(facts) { + if facts.TimeoutsState.TasksTimeout != nil && *facts.TimeoutsState.TasksTimeout != config.NoTimeoutDuration && facts.TimeoutsState.StartTime != nil { + // If the elapsed time since the PipelineRun's start time is greater than the the PipelineRun's Tasks timeout, skip. + return facts.TimeoutsState.Clock.Since(*facts.TimeoutsState.StartTime) > *facts.TimeoutsState.TasksTimeout + } + } + + return false +} + +// skipBecausePipelineRunFinallyTimeoutReached returns true if the task shouldn't be launched because the elapsed time since +// finally tasks started being executed is greater than the PipelineRun's finally timeout +func (t *ResolvedPipelineTask) skipBecausePipelineRunFinallyTimeoutReached(facts *PipelineRunFacts) bool { + if t.checkParentsDone(facts) && t.IsFinalTask(facts) { + if facts.TimeoutsState.FinallyTimeout != nil && *facts.TimeoutsState.FinallyTimeout != config.NoTimeoutDuration && facts.TimeoutsState.FinallyStartTime != nil { + // If the elapsed time since the PipelineRun's finally start time is greater than the the PipelineRun's finally timeout, skip. + return facts.TimeoutsState.Clock.Since(*facts.TimeoutsState.FinallyStartTime) > *facts.TimeoutsState.FinallyTimeout + } + } + + return false +} + // IsFinalTask returns true if a task is a finally task func (t *ResolvedPipelineTask) IsFinalTask(facts *PipelineRunFacts) bool { return facts.isFinalTask(t.PipelineTask.Name) @@ -440,6 +539,10 @@ func (t *ResolvedPipelineTask) IsFinallySkipped(facts *PipelineRunFacts) TaskSki skippingReason = v1beta1.MissingResultsSkip case t.skipBecauseWhenExpressionsEvaluatedToFalse(facts): skippingReason = v1beta1.WhenExpressionsSkip + case t.skipBecausePipelineRunPipelineTimeoutReached(facts): + skippingReason = v1beta1.PipelineTimedOutSkip + case t.skipBecausePipelineRunFinallyTimeoutReached(facts): + skippingReason = v1beta1.FinallyTimedOutSkip default: skippingReason = v1beta1.None } diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go index 7be6bbbb9c4..4959c184604 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go @@ -22,6 +22,7 @@ import ( "fmt" "sort" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -285,11 +286,23 @@ func withCancelled(tr *v1beta1.TaskRun) *v1beta1.TaskRun { return tr } +func withCancelledForTimeout(tr *v1beta1.TaskRun) *v1beta1.TaskRun { + tr.Spec.StatusMessage = v1beta1.TaskRunCancelledByPipelineTimeoutMsg + tr.Status.Conditions[0].Reason = v1beta1.TaskRunSpecStatusCancelled + return tr +} + func withRunCancelled(run *v1alpha1.Run) *v1alpha1.Run { run.Status.Conditions[0].Reason = v1alpha1.RunReasonCancelled return run } +func withRunCancelledForTimeout(run *v1alpha1.Run) *v1alpha1.Run { + run.Spec.StatusMessage = v1alpha1.RunCancelledByPipelineTimeoutMsg + run.Status.Conditions[0].Reason = v1alpha1.RunReasonCancelled + return run +} + func withCancelledBySpec(tr *v1beta1.TaskRun) *v1beta1.TaskRun { tr.Spec.Status = v1beta1.TaskRunSpecStatusCancelled return tr @@ -752,9 +765,12 @@ func dagFromState(state PipelineRunState) (*dag.Graph, error) { func TestIsSkipped(t *testing.T) { for _, tc := range []struct { - name string - state PipelineRunState - expected map[string]bool + name string + state PipelineRunState + startTime time.Time + tasksTimeout time.Duration + pipelineTimeout time.Duration + expected map[string]bool }{{ name: "tasks-failed", state: oneFailedState, @@ -1129,6 +1145,42 @@ func TestIsSkipped(t *testing.T) { "mytask22": true, "mytask23": true, }, + }, { + name: "pipeline-tasks-timeout-not-reached", + state: oneStartedState, + startTime: now.Add(-5 * time.Minute), + tasksTimeout: 10 * time.Minute, + expected: map[string]bool{ + "mytask1": false, + "mytask2": false, + }, + }, { + name: "pipeline-timeout-not--reached", + state: oneStartedState, + startTime: now.Add(-5 * time.Minute), + pipelineTimeout: 10 * time.Minute, + expected: map[string]bool{ + "mytask1": false, + "mytask2": false, + }, + }, { + name: "pipeline-tasks-timeout-reached", + state: oneStartedState, + startTime: now.Add(-5 * time.Minute), + tasksTimeout: 4 * time.Minute, + expected: map[string]bool{ + "mytask1": false, + "mytask2": true, + }, + }, { + name: "pipeline-timeout-reached", + state: oneStartedState, + startTime: now.Add(-5 * time.Minute), + pipelineTimeout: 4 * time.Minute, + expected: map[string]bool{ + "mytask1": false, + "mytask2": true, + }, }} { t.Run(tc.name, func(t *testing.T) { d, err := dagFromState(tc.state) @@ -1136,10 +1188,24 @@ func TestIsSkipped(t *testing.T) { t.Fatalf("Could not get a dag from the TC state %#v: %v", tc.state, err) } stateMap := tc.state.ToMap() + startTime := tc.startTime + if startTime.IsZero() { + startTime = now + } facts := PipelineRunFacts{ State: tc.state, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + StartTime: &startTime, + Clock: testClock, + }, + } + if tc.tasksTimeout != 0 { + facts.TimeoutsState.TasksTimeout = &tc.tasksTimeout + } + if tc.pipelineTimeout != 0 { + facts.TimeoutsState.PipelineTimeout = &tc.pipelineTimeout } for taskName, isSkipped := range tc.expected { rpt := stateMap[taskName] @@ -1266,6 +1332,13 @@ func TestIsFailure(t *testing.T) { TaskRun: withCancelled(newTaskRun(trs[0])), }, want: false, + }, { + name: "taskrun cancelled for timeout", + rpt: ResolvedPipelineTask{ + PipelineTask: &v1beta1.PipelineTask{Name: "task"}, + TaskRun: withCancelledForTimeout(makeFailed(trs[0])), + }, + want: true, }, { name: "run cancelled", rpt: ResolvedPipelineTask{ @@ -1274,6 +1347,14 @@ func TestIsFailure(t *testing.T) { CustomTask: true, }, want: true, + }, { + name: "run cancelled for timeout", + rpt: ResolvedPipelineTask{ + PipelineTask: &v1beta1.PipelineTask{Name: "task"}, + Run: withRunCancelledForTimeout(makeRunFailed(runs[0])), + CustomTask: true, + }, + want: true, }, { name: "run cancelled but not failed", rpt: ResolvedPipelineTask{ @@ -1688,6 +1769,9 @@ func TestSkipBecauseParentTaskWasSkipped(t *testing.T) { State: tc.state, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } for taskName, isSkipped := range tc.expected { rpt := stateMap[taskName] @@ -2851,46 +2935,129 @@ func TestResolvedPipelineRunTask_IsFinallySkipped(t *testing.T) { }, }} - expected := map[string]bool{ - "final-task-1": false, - "final-task-2": true, - "final-task-3": false, - "final-task-4": true, - "final-task-5": false, - "final-task-6": true, + testCases := []struct { + name string + startTime time.Time + finallyStartTime time.Time + finallyTimeout time.Duration + pipelineTimeout time.Duration + expected map[string]bool + }{ + { + name: "no finally timeout", + expected: map[string]bool{ + "final-task-1": false, + "final-task-2": true, + "final-task-3": false, + "final-task-4": true, + "final-task-5": false, + "final-task-6": true, + }, + }, { + name: "finally timeout not yet reached", + finallyStartTime: now.Add(-5 * time.Minute), + finallyTimeout: 10 * time.Minute, + expected: map[string]bool{ + "final-task-1": false, + "final-task-2": true, + "final-task-3": false, + "final-task-4": true, + "final-task-5": false, + "final-task-6": true, + }, + }, { + name: "pipeline timeout not yet reached", + startTime: now.Add(-5 * time.Minute), + pipelineTimeout: 10 * time.Minute, + expected: map[string]bool{ + "final-task-1": false, + "final-task-2": true, + "final-task-3": false, + "final-task-4": true, + "final-task-5": false, + "final-task-6": true, + }, + }, { + name: "finally timeout passed", + finallyStartTime: now.Add(-5 * time.Minute), + finallyTimeout: 4 * time.Minute, + expected: map[string]bool{ + "final-task-1": true, + "final-task-2": true, + "final-task-3": true, + "final-task-4": true, + "final-task-5": true, + "final-task-6": true, + }, + }, { + name: "pipeline timeout passed", + startTime: now.Add(-5 * time.Minute), + pipelineTimeout: 4 * time.Minute, + expected: map[string]bool{ + "final-task-1": true, + "final-task-2": true, + "final-task-3": true, + "final-task-4": true, + "final-task-5": true, + "final-task-6": true, + }, + }, } - tasks := v1beta1.PipelineTaskList([]v1beta1.PipelineTask{*state[0].PipelineTask}) - d, err := dag.Build(tasks, tasks.Deps()) - if err != nil { - t.Fatalf("Could not get a dag from the dag tasks %#v: %v", state[0], err) - } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tasks := v1beta1.PipelineTaskList([]v1beta1.PipelineTask{*state[0].PipelineTask}) + d, err := dag.Build(tasks, tasks.Deps()) + if err != nil { + t.Fatalf("Could not get a dag from the dag tasks %#v: %v", state[0], err) + } - // build graph with finally tasks - var pts []v1beta1.PipelineTask - for i := range state { - if i > 0 { // first one is a dag task that produces a result - pts = append(pts, *state[i].PipelineTask) - } - } - dfinally, err := dag.Build(v1beta1.PipelineTaskList(pts), map[string][]string{}) - if err != nil { - t.Fatalf("Could not get a dag from the finally tasks %#v: %v", pts, err) - } + // build graph with finally tasks + var pts []v1beta1.PipelineTask + for i := range state { + if i > 0 { // first one is a dag task that produces a result + pts = append(pts, *state[i].PipelineTask) + } + } + dfinally, err := dag.Build(v1beta1.PipelineTaskList(pts), map[string][]string{}) + if err != nil { + t.Fatalf("Could not get a dag from the finally tasks %#v: %v", pts, err) + } - facts := &PipelineRunFacts{ - State: state, - TasksGraph: d, - FinalTasksGraph: dfinally, - } + finallyStartTime := tc.finallyStartTime + if finallyStartTime.IsZero() { + finallyStartTime = now + } + prStartTime := tc.startTime + if prStartTime.IsZero() { + prStartTime = now + } + facts := &PipelineRunFacts{ + State: state, + TasksGraph: d, + FinalTasksGraph: dfinally, + TimeoutsState: PipelineRunTimeoutsState{ + StartTime: &prStartTime, + FinallyStartTime: &finallyStartTime, + Clock: testClock, + }, + } + if tc.finallyTimeout != 0 { + facts.TimeoutsState.FinallyTimeout = &tc.finallyTimeout + } + if tc.pipelineTimeout != 0 { + facts.TimeoutsState.PipelineTimeout = &tc.pipelineTimeout + } - for i := range state { - if i > 0 { // first one is a dag task that produces a result - finallyTaskName := state[i].PipelineTask.Name - if d := cmp.Diff(expected[finallyTaskName], state[i].IsFinallySkipped(facts).IsSkipped); d != "" { - t.Fatalf("Didn't get expected isFinallySkipped from finally task %s: %s", finallyTaskName, diff.PrintWantGot(d)) + for i := range state { + if i > 0 { // first one is a dag task that produces a result + finallyTaskName := state[i].PipelineTask.Name + if d := cmp.Diff(tc.expected[finallyTaskName], state[i].IsFinallySkipped(facts).IsSkipped); d != "" { + t.Fatalf("Didn't get expected isFinallySkipped from finally task %s: %s", finallyTaskName, diff.PrintWantGot(d)) + } + } } - } + }) } } @@ -3008,6 +3175,9 @@ func TestResolvedPipelineRunTask_IsFinallySkippedByCondition(t *testing.T) { State: tc.state, TasksGraph: d, FinalTasksGraph: dfinally, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } for _, state := range tc.state[1:] { @@ -3080,6 +3250,9 @@ func TestResolvedPipelineRunTask_IsFinalTask(t *testing.T) { State: state, TasksGraph: d, FinalTasksGraph: dfinally, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } finallyTaskName := state[1].PipelineTask.Name @@ -4484,8 +4657,8 @@ func TestIsRunning(t *testing.T) { want: true, }} { t.Run(tc.name, func(t *testing.T) { - if got := tc.rpt.isRunning(); got != tc.want { - t.Errorf("expected isRunning: %t but got %t", tc.want, got) + if got := tc.rpt.IsRunning(); got != tc.want { + t.Errorf("expected IsRunning: %t but got %t", tc.want, got) } }) diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go b/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go index 0ce78319ca0..2c75f57f880 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go @@ -19,6 +19,7 @@ package resources import ( "context" "fmt" + "time" "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" @@ -55,6 +56,7 @@ type PipelineRunFacts struct { SpecStatus v1beta1.PipelineRunSpecStatus TasksGraph *dag.Graph FinalTasksGraph *dag.Graph + TimeoutsState PipelineRunTimeoutsState // SkipCache is a hash of PipelineTask names that stores whether a task will be // executed or not, because it's either not reachable via the DAG due to the pipeline @@ -67,6 +69,17 @@ type PipelineRunFacts struct { SkipCache map[string]TaskSkipStatus } +// PipelineRunTimeoutsState records information about start times and timeouts for the PipelineRun, so that the PipelineRunFacts +// can reference those values in its functions. +type PipelineRunTimeoutsState struct { + StartTime *time.Time + FinallyStartTime *time.Time + PipelineTimeout *time.Duration + TasksTimeout *time.Duration + FinallyTimeout *time.Duration + Clock clock.PassiveClock +} + // pipelineRunStatusCount holds the count of successful, failed, cancelled, skipped, and incomplete tasks type pipelineRunStatusCount struct { // skipped tasks count @@ -79,6 +92,8 @@ type pipelineRunStatusCount struct { Cancelled int // number of tasks which are still pending, have not executed Incomplete int + // count of tasks skipped due to the relevant timeout having elapsed before the task is launched + SkippedDueToTimeout int } // ResetSkippedCache resets the skipped cache in the facts map @@ -362,7 +377,7 @@ func (facts *PipelineRunFacts) IsStopping() bool { func (facts *PipelineRunFacts) IsRunning() bool { for _, t := range facts.State { if facts.isDAGTask(t.PipelineTask.Name) { - if t.isRunning() { + if t.IsRunning() { return true } } @@ -409,6 +424,30 @@ func (facts *PipelineRunFacts) DAGExecutionQueue() (PipelineRunState, error) { return tasks, nil } +// GetFinalTaskNames returns a list of all final task names +func (facts *PipelineRunFacts) GetFinalTaskNames() sets.String { + names := sets.NewString() + // return list of tasks with all final tasks + for _, t := range facts.State { + if facts.isFinalTask(t.PipelineTask.Name) { + names.Insert(t.PipelineTask.Name) + } + } + return names +} + +// GetTaskNames returns a list of all non-final task names +func (facts *PipelineRunFacts) GetTaskNames() sets.String { + names := sets.NewString() + // return list of tasks with all final tasks + for _, t := range facts.State { + if !facts.isFinalTask(t.PipelineTask.Name) { + names.Insert(t.PipelineTask.Name) + } + } + return names +} + // GetFinalTasks returns a list of final tasks which needs to be executed next // GetFinalTasks returns final tasks only when all DAG tasks have finished executing or have been skipped func (facts *PipelineRunFacts) GetFinalTasks() PipelineRunState { @@ -467,7 +506,7 @@ func (facts *PipelineRunFacts) GetPipelineConditionStatus(ctx context.Context, p } switch { - case s.Failed > 0: + case s.Failed > 0 || s.SkippedDueToTimeout > 0: // Set reason to ReasonFailed - At least one failed reason = v1beta1.PipelineRunReasonFailed.String() status = corev1.ConditionFalse @@ -630,23 +669,33 @@ func (facts *PipelineRunFacts) checkFinalTasksDone() bool { // getPipelineTasksCount returns the count of successful tasks, failed tasks, cancelled tasks, skipped task, and incomplete tasks func (facts *PipelineRunFacts) getPipelineTasksCount() pipelineRunStatusCount { s := pipelineRunStatusCount{ - Skipped: 0, - Succeeded: 0, - Failed: 0, - Cancelled: 0, - Incomplete: 0, + Skipped: 0, + Succeeded: 0, + Failed: 0, + Cancelled: 0, + Incomplete: 0, + SkippedDueToTimeout: 0, } for _, t := range facts.State { switch { // increment success counter since the task is successful case t.isSuccessful(): s.Succeeded++ + // increment failure counter since the task is cancelled due to a timeout + case t.isCancelledForTimeOut(): + s.Failed++ // increment cancelled counter since the task is cancelled case t.isCancelled(): s.Cancelled++ // increment failure counter since the task has failed case t.isFailure(): s.Failed++ + // increment skipped and skipped due to timeout counters since the task was skipped due to the pipeline, tasks, or finally timeout being reached before the task was launched + case t.Skip(facts).SkippingReason == v1beta1.PipelineTimedOutSkip || + t.Skip(facts).SkippingReason == v1beta1.TasksTimedOutSkip || + t.IsFinallySkipped(facts).SkippingReason == v1beta1.FinallyTimedOutSkip: + s.Skipped++ + s.SkippedDueToTimeout++ // increment skip counter since the task is skipped case t.Skip(facts).IsSkipped: s.Skipped++ diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go index b164c222cfc..57e2f33b66c 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go @@ -227,6 +227,9 @@ func TestPipelineRunFacts_CheckDAGTasksDoneDone(t *testing.T) { State: tc.state, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } isDone := facts.checkTasksDone(d) @@ -1113,6 +1116,9 @@ func TestDAGExecutionQueue(t *testing.T) { SpecStatus: tc.specStatus, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } queue, err := facts.DAGExecutionQueue() if err != nil { @@ -1199,6 +1205,9 @@ func TestDAGExecutionQueueSequentialTasks(t *testing.T) { SpecStatus: tc.specStatus, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } queue, err := facts.DAGExecutionQueue() if err != nil { @@ -1288,6 +1297,9 @@ func TestDAGExecutionQueueSequentialRuns(t *testing.T) { SpecStatus: tc.specStatus, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } queue, err := facts.DAGExecutionQueue() if err != nil { @@ -1377,6 +1389,9 @@ func TestPipelineRunState_CompletedOrSkippedDAGTasks(t *testing.T) { SpecStatus: tc.specStatus, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } names := facts.completedOrSkippedDAGTasks() if d := cmp.Diff(names, tc.expectedNames); d != "" { @@ -1445,7 +1460,7 @@ func buildPipelineStateWithLargeDepencyGraph(t *testing.T) PipelineRunState { return pipelineRunState } -func TestPipelineRunState_GetFinalTasks(t *testing.T) { +func TestPipelineRunState_GetFinalTasksAndNames(t *testing.T) { tcs := []struct { name string desc string @@ -1453,6 +1468,8 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { DAGTasks []v1beta1.PipelineTask finalTasks []v1beta1.PipelineTask expectedFinalTasks PipelineRunState + expectedFinalNames sets.String + expectedTaskNames sets.String }{{ // tasks: [ mytask1, mytask2] // none finally @@ -1463,6 +1480,8 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { DAGTasks: []v1beta1.PipelineTask{pts[0], pts[1]}, finalTasks: []v1beta1.PipelineTask{}, expectedFinalTasks: PipelineRunState{}, + expectedFinalNames: nil, + expectedTaskNames: sets.NewString(pts[0].Name, pts[1].Name), }, { // tasks: [ mytask1] // finally: [mytask2] @@ -1472,6 +1491,8 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, expectedFinalTasks: PipelineRunState{}, + expectedFinalNames: sets.NewString(pts[1].Name), + expectedTaskNames: sets.NewString(pts[0].Name), }, { // tasks: [ mytask1] // finally: [mytask2] @@ -1481,6 +1502,8 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, expectedFinalTasks: PipelineRunState{}, + expectedFinalNames: sets.NewString(pts[1].Name), + expectedTaskNames: sets.NewString(pts[0].Name), }, { // tasks: [ mytask1] // finally: [mytask2] @@ -1490,6 +1513,8 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, expectedFinalTasks: PipelineRunState{oneFinishedState[1]}, + expectedFinalNames: sets.NewString(pts[1].Name), + expectedTaskNames: sets.NewString(pts[0].Name), }, { // tasks: [ mytask1] // finally: [mytask2] @@ -1499,6 +1524,8 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, expectedFinalTasks: PipelineRunState{oneFinishedState[1]}, + expectedFinalNames: sets.NewString(pts[1].Name), + expectedTaskNames: sets.NewString(pts[0].Name), }, { // tasks: [ mytask1] // finally: [mytask2] @@ -1508,6 +1535,8 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[1]}, expectedFinalTasks: PipelineRunState{}, + expectedFinalNames: sets.NewString(pts[1].Name), + expectedTaskNames: sets.NewString(pts[0].Name), }, { // tasks: [ mytask1] // finally: [mytask4] @@ -1517,6 +1546,8 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { DAGTasks: []v1beta1.PipelineTask{pts[0]}, finalTasks: []v1beta1.PipelineTask{pts[3]}, expectedFinalTasks: PipelineRunState{retryableFinalState[1]}, + expectedFinalNames: sets.NewString(pts[3].Name), + expectedTaskNames: sets.NewString(pts[0].Name), }} for _, tc := range tcs { dagGraph, err := dag.Build(v1beta1.PipelineTaskList(tc.DAGTasks), v1beta1.PipelineTaskList(tc.DAGTasks).Deps()) @@ -1532,11 +1563,24 @@ func TestPipelineRunState_GetFinalTasks(t *testing.T) { State: tc.state, TasksGraph: dagGraph, FinalTasksGraph: finalGraph, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } next := facts.GetFinalTasks() if d := cmp.Diff(tc.expectedFinalTasks, next); d != "" { t.Errorf("Didn't get expected final Tasks for %s (%s): %s", tc.name, tc.desc, diff.PrintWantGot(d)) } + + finalTaskNames := facts.GetFinalTaskNames() + if d := cmp.Diff(tc.expectedFinalNames, finalTaskNames); d != "" { + t.Errorf("Didn't get expected final Task names for %s (%s): %s", tc.name, tc.desc, diff.PrintWantGot(d)) + } + + nonFinalTaskNames := facts.GetTaskNames() + if d := cmp.Diff(tc.expectedTaskNames, nonFinalTaskNames); d != "" { + t.Errorf("Didn't get expected non-final Task names for %s (%s): %s", tc.name, tc.desc, diff.PrintWantGot(d)) + } }) } } @@ -1558,6 +1602,12 @@ func TestGetPipelineConditionStatus(t *testing.T) { TaskRun: withCancelled(makeFailed(trs[0])), }} + var taskCancelledFailedTimedOut = PipelineRunState{{ + PipelineTask: &pts[4], + TaskRunName: "pipelinerun-mytask1", + TaskRun: withCancelledForTimeout(makeFailed(trs[0])), + }} + var cancelledTask = PipelineRunState{{ PipelineTask: &pts[3], // 1 retry needed TaskRunName: "pipelinerun-mytask1", @@ -1590,6 +1640,30 @@ func TestGetPipelineConditionStatus(t *testing.T) { }, }} + var timedOutRun = PipelineRunState{{ + PipelineTask: &pts[12], + CustomTask: true, + RunName: "pipelinerun-mytask14", + Run: &v1alpha1.Run{ + Spec: v1alpha1.RunSpec{ + StatusMessage: v1alpha1.RunCancelledByPipelineTimeoutMsg, + }, + Status: v1alpha1.RunStatus{ + Status: duckv1.Status{Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: v1alpha1.RunReasonCancelled, + }}}, + }, + }, + }} + + var notRunningRun = PipelineRunState{{ + PipelineTask: &pts[12], + CustomTask: true, + RunName: "pipelinerun-mytask14", + }} + // 6 Tasks, 4 that run in parallel in the beginning // Of the 4, 1 passed, 1 cancelled, 2 failed // 1 runAfter the passed one, currently running @@ -1625,11 +1699,15 @@ func TestGetPipelineConditionStatus(t *testing.T) { TaskRun: makeFailed(trs[0]), }} + tenMinutesAgo := now.Add(-10 * time.Minute) + fiveMinuteDuration := 5 * time.Minute + tcs := []struct { name string state PipelineRunState finallyState PipelineRunState specStatus v1beta1.PipelineRunSpecStatus + timeoutsState PipelineRunTimeoutsState expectedStatus corev1.ConditionStatus expectedReason string expectedSucceeded int @@ -1712,6 +1790,12 @@ func TestGetPipelineConditionStatus(t *testing.T) { expectedReason: v1beta1.PipelineRunReasonCancelled.String(), expectedStatus: corev1.ConditionFalse, expectedCancelled: 1, + }, { + name: "task that was cancelled for timeout", + state: taskCancelledFailedTimedOut, + expectedReason: v1beta1.PipelineRunReasonFailed.String(), + expectedStatus: corev1.ConditionFalse, + expectedFailed: 1, }, { name: "task with multiple failures", state: taskMultipleFailuresSkipRunning, @@ -1752,6 +1836,22 @@ func TestGetPipelineConditionStatus(t *testing.T) { expectedStatus: corev1.ConditionFalse, expectedReason: v1beta1.PipelineRunReasonCancelled.String(), expectedCancelled: 1, + }, { + name: "cancelled for timeout run should result in failed pipeline", + state: timedOutRun, + expectedStatus: corev1.ConditionFalse, + expectedReason: v1beta1.PipelineRunReasonFailed.String(), + expectedFailed: 1, + }, { + name: "skipped for timeout run should result in failed pipeline", + state: notRunningRun, + timeoutsState: PipelineRunTimeoutsState{ + StartTime: &tenMinutesAgo, + TasksTimeout: &fiveMinuteDuration, + }, + expectedStatus: corev1.ConditionFalse, + expectedReason: v1beta1.PipelineRunReasonFailed.String(), + expectedSkipped: 1, }} for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { @@ -1771,11 +1871,18 @@ func TestGetPipelineConditionStatus(t *testing.T) { if err != nil { t.Fatalf("Unexpected error while building DAG for finally state %v: %v", tc.finallyState, err) } + + timeoutsState := tc.timeoutsState + if timeoutsState.Clock == nil { + timeoutsState.Clock = testClock + } + facts := PipelineRunFacts{ State: tc.state, SpecStatus: tc.specStatus, TasksGraph: d, FinalTasksGraph: dfinally, + TimeoutsState: timeoutsState, } c := facts.GetPipelineConditionStatus(context.Background(), pr, zap.NewNop().Sugar(), testClock) wantCondition := &apis.Condition{ @@ -1918,6 +2025,9 @@ func TestGetPipelineConditionStatus_WithFinalTasks(t *testing.T) { State: tc.state, TasksGraph: d, FinalTasksGraph: df, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } c := facts.GetPipelineConditionStatus(context.Background(), pr, zap.NewNop().Sugar(), testClock) wantCondition := &apis.Condition{ @@ -1955,6 +2065,9 @@ func TestGetPipelineConditionStatus_PipelineTimeoutDeprecated(t *testing.T) { State: oneFinishedState, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } c := facts.GetPipelineConditionStatus(context.Background(), pr, zap.NewNop().Sugar(), testClock) if c.Status != corev1.ConditionFalse && c.Reason != v1beta1.PipelineRunReasonTimedOut.String() { @@ -1985,6 +2098,9 @@ func TestGetPipelineConditionStatus_PipelineTimeouts(t *testing.T) { State: oneFinishedState, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } c := facts.GetPipelineConditionStatus(context.Background(), pr, zap.NewNop().Sugar(), testClock) if c.Status != corev1.ConditionFalse && c.Reason != v1beta1.PipelineRunReasonTimedOut.String() { @@ -2240,6 +2356,9 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) { State: tc.state, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } s := facts.GetPipelineTaskStatus() if d := cmp.Diff(tc.expectedStatus, s); d != "" { @@ -2300,6 +2419,9 @@ func TestPipelineRunFacts_GetSkippedTasks(t *testing.T) { State: tc.state, TasksGraph: d, FinalTasksGraph: df, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } actualSkippedTasks := facts.GetSkippedTasks() if d := cmp.Diff(tc.expectedSkippedTasks, actualSkippedTasks); d != "" { @@ -2352,6 +2474,9 @@ func TestPipelineRunFacts_IsRunning(t *testing.T) { State: tc.state, TasksGraph: d, FinalTasksGraph: &dag.Graph{}, + TimeoutsState: PipelineRunTimeoutsState{ + Clock: testClock, + }, } if tc.expected != facts.IsRunning() { t.Errorf("IsRunning expected to be %v", tc.expected) diff --git a/pkg/reconciler/pipelinerun/timeout.go b/pkg/reconciler/pipelinerun/timeout.go new file mode 100644 index 00000000000..880aad707c0 --- /dev/null +++ b/pkg/reconciler/pipelinerun/timeout.go @@ -0,0 +1,139 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pipelinerun + +import ( + "context" + "encoding/json" + "fmt" + "log" + "strings" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "gomodules.xyz/jsonpatch/v2" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" +) + +var timeoutTaskRunPatchBytes, timeoutRunPatchBytes []byte + +func init() { + var err error + timeoutTaskRunPatchBytes, err = json.Marshal([]jsonpatch.JsonPatchOperation{ + { + Operation: "add", + Path: "/spec/status", + Value: v1beta1.TaskRunSpecStatusCancelled, + }, + { + Operation: "add", + Path: "/spec/statusMessage", + Value: v1beta1.TaskRunCancelledByPipelineTimeoutMsg, + }}) + if err != nil { + log.Fatalf("failed to marshal TaskRun timeout patch bytes: %v", err) + } + timeoutRunPatchBytes, err = json.Marshal([]jsonpatch.JsonPatchOperation{ + { + Operation: "add", + Path: "/spec/status", + Value: v1alpha1.RunSpecStatusCancelled, + }, + { + Operation: "add", + Path: "/spec/statusMessage", + Value: v1alpha1.RunCancelledByPipelineTimeoutMsg, + }}) + if err != nil { + log.Fatalf("failed to marshal Run timeout patch bytes: %v", err) + } +} + +// timeoutPipelineRun marks the PipelineRun as timed out and any resolved TaskRun(s) too. +func timeoutPipelineRun(ctx context.Context, logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) error { + errs := timeoutPipelineTasks(ctx, logger, pr, clientSet) + + // If we successfully timed out all the TaskRuns and Runs, we can consider the PipelineRun timed out. + if len(errs) == 0 { + reason := v1beta1.PipelineRunReasonTimedOut.String() + + pr.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: reason, + Message: fmt.Sprintf("PipelineRun %q failed to finish within %q", pr.Name, pr.PipelineTimeout(ctx).String()), + }) + // update pr completed time + pr.Status.CompletionTime = &metav1.Time{Time: time.Now()} + } else { + e := strings.Join(errs, "\n") + // Indicate that we failed to time out the PipelineRun + pr.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: ReasonCouldntTimeOut, + Message: fmt.Sprintf("PipelineRun %q was timed out but had errors trying to time out TaskRuns and/or Runs: %s", pr.Name, e), + }) + return fmt.Errorf("error(s) from timing out TaskRun(s) from PipelineRun %s: %s", pr.Name, e) + } + return nil +} + +func timeoutRun(ctx context.Context, runName string, namespace string, clientSet clientset.Interface) error { + _, err := clientSet.TektonV1alpha1().Runs(namespace).Patch(ctx, runName, types.JSONPatchType, timeoutRunPatchBytes, metav1.PatchOptions{}, "") + return err +} + +// timeoutPipelineTaskRuns patches `TaskRun` and `Run` with canceled status and an appropriate message +func timeoutPipelineTasks(ctx context.Context, logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) []string { + return timeoutPipelineTasksForTaskNames(ctx, logger, pr, clientSet, sets.NewString()) +} + +// timeoutPipelineTasksForTaskNames patches `TaskRun`s and `Run`s for the given task names, or all if no task names are given, with canceled status and appropriate message +func timeoutPipelineTasksForTaskNames(ctx context.Context, logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface, taskNames sets.String) []string { + errs := []string{} + + trNames, runNames, err := getChildObjectsFromPRStatusForTaskNames(ctx, pr.Status, taskNames) + if err != nil { + errs = append(errs, err.Error()) + } + + for _, taskRunName := range trNames { + logger.Infof("cancelling TaskRun %s for timeout", taskRunName) + + if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(ctx, taskRunName, types.JSONPatchType, timeoutTaskRunPatchBytes, metav1.PatchOptions{}, ""); err != nil { + errs = append(errs, fmt.Errorf("Failed to patch TaskRun `%s` with cancellation: %s", taskRunName, err).Error()) + continue + } + } + + for _, runName := range runNames { + logger.Infof("cancelling Run %s for timeout", runName) + + if err := timeoutRun(ctx, runName, pr.Namespace, clientSet); err != nil { + errs = append(errs, fmt.Errorf("Failed to patch Run `%s` with cancellation: %s", runName, err).Error()) + continue + } + } + + return errs +} diff --git a/pkg/reconciler/pipelinerun/timeout_test.go b/pkg/reconciler/pipelinerun/timeout_test.go new file mode 100644 index 00000000000..580ab28757d --- /dev/null +++ b/pkg/reconciler/pipelinerun/timeout_test.go @@ -0,0 +1,252 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pipelinerun + +import ( + "context" + "testing" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + _ "github.com/tektoncd/pipeline/pkg/pipelinerunmetrics/fake" // Make sure the pipelinerunmetrics are setup + ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" + "github.com/tektoncd/pipeline/test" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "knative.dev/pkg/apis" + logtesting "knative.dev/pkg/logging/testing" +) + +func TestTimeoutPipelineRun(t *testing.T) { + testCases := []struct { + name string + embeddedStatus string + pipelineRun *v1beta1.PipelineRun + taskRuns []*v1beta1.TaskRun + runs []*v1alpha1.Run + wantErr bool + }{{ + name: "no-resolved-taskrun", + embeddedStatus: config.DefaultEmbeddedStatus, + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-timedout"}, + Spec: v1beta1.PipelineRunSpec{}, + }, + }, { + name: "one-taskrun", + embeddedStatus: config.DefaultEmbeddedStatus, + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-timedout"}, + Spec: v1beta1.PipelineRunSpec{}, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "t1": {PipelineTaskName: "task-1"}, + }, + }}, + }, + taskRuns: []*v1beta1.TaskRun{ + {ObjectMeta: metav1.ObjectMeta{Name: "t1"}}, + }, + }, { + name: "multiple-taskruns", + embeddedStatus: config.DefaultEmbeddedStatus, + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-timedout"}, + Spec: v1beta1.PipelineRunSpec{}, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "t1": {PipelineTaskName: "task-1"}, + "t2": {PipelineTaskName: "task-2"}, + }, + }}, + }, + taskRuns: []*v1beta1.TaskRun{ + {ObjectMeta: metav1.ObjectMeta{Name: "t1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "t2"}}, + }, + }, { + name: "multiple-runs", + embeddedStatus: config.DefaultEmbeddedStatus, + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-timedout"}, + Spec: v1beta1.PipelineRunSpec{}, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + Runs: map[string]*v1beta1.PipelineRunRunStatus{ + "t1": {PipelineTaskName: "task-1"}, + "t2": {PipelineTaskName: "task-2"}, + }, + }}, + }, + runs: []*v1alpha1.Run{ + {ObjectMeta: metav1.ObjectMeta{Name: "t1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "t2"}}, + }, + }, { + name: "child-references-with-both", + embeddedStatus: config.BothEmbeddedStatus, + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-timedout"}, + Spec: v1beta1.PipelineRunSpec{}, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: []v1beta1.ChildStatusReference{ + { + TypeMeta: runtime.TypeMeta{Kind: "TaskRun"}, + Name: "t1", + PipelineTaskName: "task-1", + }, + { + TypeMeta: runtime.TypeMeta{Kind: "TaskRun"}, + Name: "t2", + PipelineTaskName: "task-2", + }, + { + TypeMeta: runtime.TypeMeta{Kind: "Run"}, + Name: "r1", + PipelineTaskName: "run-1", + }, + { + TypeMeta: runtime.TypeMeta{Kind: "Run"}, + Name: "r2", + PipelineTaskName: "run-2", + }, + }, + }}, + }, + taskRuns: []*v1beta1.TaskRun{ + {ObjectMeta: metav1.ObjectMeta{Name: "t1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "t2"}}, + }, + runs: []*v1alpha1.Run{ + {ObjectMeta: metav1.ObjectMeta{Name: "r1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "r2"}}, + }, + }, { + name: "child-references-with-minimal", + embeddedStatus: config.MinimalEmbeddedStatus, + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-timedout"}, + Spec: v1beta1.PipelineRunSpec{}, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: []v1beta1.ChildStatusReference{ + { + TypeMeta: runtime.TypeMeta{Kind: "TaskRun"}, + Name: "t1", + PipelineTaskName: "task-1", + }, + { + TypeMeta: runtime.TypeMeta{Kind: "TaskRun"}, + Name: "t2", + PipelineTaskName: "task-2", + }, + { + TypeMeta: runtime.TypeMeta{Kind: "Run"}, + Name: "r1", + PipelineTaskName: "run-1", + }, + { + TypeMeta: runtime.TypeMeta{Kind: "Run"}, + Name: "r2", + PipelineTaskName: "run-2", + }, + }, + }}, + }, + taskRuns: []*v1beta1.TaskRun{ + {ObjectMeta: metav1.ObjectMeta{Name: "t1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "t2"}}, + }, + runs: []*v1alpha1.Run{ + {ObjectMeta: metav1.ObjectMeta{Name: "r1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "r2"}}, + }, + }, { + name: "unknown-kind-on-child-references", + embeddedStatus: config.MinimalEmbeddedStatus, + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-timedout"}, + Spec: v1beta1.PipelineRunSpec{}, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: []v1beta1.ChildStatusReference{{ + TypeMeta: runtime.TypeMeta{Kind: "InvalidKind"}, + Name: "t1", + PipelineTaskName: "task-1", + }}, + }}, + }, + wantErr: true, + }} + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + + d := test.Data{ + PipelineRuns: []*v1beta1.PipelineRun{tc.pipelineRun}, + TaskRuns: tc.taskRuns, + Runs: tc.runs, + } + ctx, _ := ttesting.SetupFakeContext(t) + cfg := config.NewStore(logtesting.TestLogger(t)) + cfg.OnConfigChanged(withCustomTasks(withEmbeddedStatus(newFeatureFlagsConfigMap(), tc.embeddedStatus))) + ctx = cfg.ToContext(ctx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, _ := test.SeedTestData(t, ctx, d) + + err := timeoutPipelineRun(ctx, logtesting.TestLogger(t), tc.pipelineRun, c.Pipeline) + if tc.wantErr { + if err == nil { + t.Error("expected an error, but did not get one") + } + } else { + if err != nil { + t.Fatal(err) + } + // This PipelineRun should still be complete and false, and the status should reflect that + cond := tc.pipelineRun.Status.GetCondition(apis.ConditionSucceeded) + if cond.IsTrue() { + t.Errorf("Expected PipelineRun status to be complete and false, but was %v", cond) + } + if tc.taskRuns != nil { + for _, expectedTR := range tc.taskRuns { + tr, err := c.Pipeline.TektonV1beta1().TaskRuns("").Get(ctx, expectedTR.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("couldn't get expected TaskRun %s, got error %s", expectedTR.Name, err) + } + if tr.Spec.Status != v1beta1.TaskRunSpecStatusCancelled { + t.Errorf("expected task %q to be marked as timed out, was %q", tr.Name, tr.Spec.Status) + } + if tr.Spec.StatusMessage != v1beta1.TaskRunCancelledByPipelineTimeoutMsg { + t.Errorf("expected task %s to have the timeout-specific status message, was %s", tr.Name, tr.Spec.StatusMessage) + } + } + } + if tc.runs != nil { + for _, expectedRun := range tc.runs { + r, err := c.Pipeline.TektonV1alpha1().Runs("").Get(ctx, expectedRun.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("couldn't get expected Run %s, got error %s", expectedRun.Name, err) + } + if r.Spec.Status != v1alpha1.RunSpecStatusCancelled { + t.Errorf("expected task %q to be marked as cancelled, was %q", r.Name, r.Spec.Status) + } + if r.Spec.StatusMessage != v1alpha1.RunCancelledByPipelineTimeoutMsg { + t.Errorf("expected run %s to have the timeout-specific status message, was %s", r.Name, r.Spec.StatusMessage) + } + } + } + } + }) + } +} diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 2cda87ea9b5..754179f85f4 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -2148,6 +2148,79 @@ status: } } +func TestReconcileOnTimedOutTaskRun(t *testing.T) { + taskRun := parse.MustParseTaskRun(t, ` +metadata: + name: test-taskrun-run-timedout + namespace: foo +spec: + status: TaskRunCancelled + statusMessage: TaskRun cancelled as pipeline has been cancelled. + taskRef: + name: test-task +status: + conditions: + - status: Unknown + type: Succeeded + podName: test-taskrun-run-timedout-pod +`) + pod, err := makePod(taskRun, simpleTask) + if err != nil { + t.Fatalf("MakePod: %v", err) + } + d := test.Data{ + TaskRuns: []*v1beta1.TaskRun{taskRun}, + Tasks: []*v1beta1.Task{simpleTask}, + Pods: []*corev1.Pod{pod}, + } + + testAssets, cancel := getTaskRunController(t, d) + defer cancel() + c := testAssets.Controller + clients := testAssets.Clients + + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { + t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) + } + newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(taskRun.Namespace).Get(testAssets.Ctx, taskRun.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err) + } + + expectedStatus := &apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: v1beta1.TaskRunReasonCancelled.String(), + Message: `TaskRun "test-taskrun-run-timedout" was cancelled. TaskRun cancelled as pipeline has been cancelled.`, + } + if d := cmp.Diff(expectedStatus, newTr.Status.GetCondition(apis.ConditionSucceeded), ignoreLastTransitionTime); d != "" { + t.Fatalf("Did not get expected condition %s", diff.PrintWantGot(d)) + } + + wantEvents := []string{ + "Normal Started", + "Warning Failed TaskRun \"test-taskrun-run-timedout\" was cancelled. TaskRun cancelled as pipeline has been cancelled.", + } + err = eventstest.CheckEventsOrdered(t, testAssets.Recorder.Events, "test-reconcile-on-timedout-taskrun", wantEvents) + if !(err == nil) { + t.Errorf(err.Error()) + } + + // reconcile the completed TaskRun again without the pod as that was deleted + d = test.Data{ + TaskRuns: []*v1beta1.TaskRun{newTr}, + Tasks: []*v1beta1.Task{simpleTask}, + } + + testAssets, cancel = getTaskRunController(t, d) + defer cancel() + c = testAssets.Controller + + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(newTr)); err != nil { + t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) + } +} + func TestReconcilePodFailuresStepImagePullFailed(t *testing.T) { taskRun := parse.MustParseTaskRun(t, ` metadata: diff --git a/test/timeout_test.go b/test/timeout_test.go index 47e857f8cda..19bc6733942 100644 --- a/test/timeout_test.go +++ b/test/timeout_test.go @@ -22,6 +22,7 @@ package test import ( "context" "fmt" + "strings" "sync" "testing" "time" @@ -112,7 +113,7 @@ spec: wg.Add(1) go func(name string) { defer wg.Done() - err := WaitForTaskRunState(ctx, c, name, FailedWithReason("TaskRunTimeout", name), "TaskRunTimeout") + err := WaitForTaskRunState(ctx, c, name, FailedWithReason(v1beta1.TaskRunReasonCancelled.String(), name), v1beta1.TaskRunReasonCancelled.String()) if err != nil { t.Errorf("Error waiting for TaskRun %s to timeout: %s", name, err) } @@ -413,7 +414,7 @@ spec: } if tr.Spec.TaskRef.Name == task2.Name && cond.Status == corev1.ConditionFalse { - if cond.Reason == "TaskRunTimeout" { + if cond.Reason == v1beta1.TaskRunReasonTimedOut.String() { return true, nil } return true, fmt.Errorf("taskRun %q completed with the wrong reason: %s", task2.Name, cond.Reason) @@ -422,7 +423,7 @@ spec: } } return false, nil - }, "TaskRunTimeout") + }, v1beta1.TaskRunReasonCancelled.String()) if err != nil { t.Errorf("Error waiting for TaskRun %s to timeout: %s", name, err) } @@ -439,7 +440,7 @@ func TestPipelineRunTasksTimeout(t *testing.T) { // cancel the context after we have waited a suitable buffer beyond the given deadline. ctx, cancel := context.WithTimeout(context.Background(), timeout+2*time.Minute) defer cancel() - c, namespace := setup(ctx, t, requireAnyGate(map[string]string{"enable-api-fields": "alpha"})) + c, namespace := setup(ctx, t) knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf) defer tearDown(context.Background(), t, c, namespace) @@ -537,7 +538,10 @@ spec: } if tr.Spec.TaskRef.Name == task.Name && cond.Status == corev1.ConditionFalse { - if cond.Reason == "TaskRunTimeout" { + if !strings.Contains(cond.Message, string(v1beta1.TaskRunCancelledByPipelineTimeoutMsg)) { + return true, fmt.Errorf("taskRun %s completed with the wrong message: %s", task.Name, cond.Message) + } + if cond.Reason == v1beta1.TaskRunReasonCancelled.String() { return true, nil } return true, fmt.Errorf("taskRun %q completed with the wrong reason: %s", task.Name, cond.Reason) @@ -546,7 +550,7 @@ spec: } } return false, nil - }, "TaskRunTimeout") + }, v1beta1.TaskRunReasonCancelled.String()) if err != nil { t.Errorf("Error waiting for TaskRun %s to timeout: %s", name, err)