From 95bf2d5a15f5285c454cf15cffb6c7d3afb34670 Mon Sep 17 00:00:00 2001 From: Rohit Agrawal Date: Thu, 22 Sep 2022 08:53:07 -0700 Subject: [PATCH] fix(analysis): Make AR End When Only Dry-Run Metrics Are Defined (#2230) Signed-off-by: Rohit Agrawal --- analysis/analysis.go | 5 ++ analysis/analysis_test.go | 117 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 118 insertions(+), 4 deletions(-) diff --git a/analysis/analysis.go b/analysis/analysis.go index 5bae4a8892..d4d2c5a430 100644 --- a/analysis/analysis.go +++ b/analysis/analysis.go @@ -508,6 +508,11 @@ func (c *Controller) assessRunStatus(run *v1alpha1.AnalysisRun, metrics []v1alph runSummary.Successful++ } } else { + // We don't really care about the failures from dry-runs and hence, if there is no current status + // found then we just set it to `AnalysisPhaseSuccessful` + if worstStatus == "" { + worstStatus = v1alpha1.AnalysisPhaseSuccessful + } // Update metric result message if message != "" { failureMessage := fmt.Sprintf("Metric assessed %s due to %s", metricStatus, message) diff --git a/analysis/analysis_test.go b/analysis/analysis_test.go index 30e3b7adb9..a88bb1085c 100644 --- a/analysis/analysis_test.go +++ b/analysis/analysis_test.go @@ -1500,7 +1500,7 @@ func TestAssessRunStatusErrorMessageAnalysisPhaseFail(t *testing.T) { func TestAssessRunStatusErrorMessageAnalysisPhaseFailInDryRunMode(t *testing.T) { status, message, dryRunSummary := StartAssessRunStatusErrorMessageAnalysisPhaseFail(t, true) - assert.Equal(t, v1alpha1.AnalysisPhaseRunning, status) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, status) assert.Equal(t, "", message) expectedDryRunSummary := v1alpha1.RunSummary{ Count: 2, @@ -1545,7 +1545,7 @@ func TestAssessRunStatusErrorMessageFromProvider(t *testing.T) { func TestAssessRunStatusErrorMessageFromProviderInDryRunMode(t *testing.T) { providerMessage := "Provider Error" status, message, dryRunSummary := StartAssessRunStatusErrorMessageFromProvider(t, providerMessage, true) - assert.Equal(t, v1alpha1.AnalysisPhaseRunning, status) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, status) assert.Equal(t, "", message) expectedDryRunSummary := v1alpha1.RunSummary{ Count: 2, @@ -1587,7 +1587,7 @@ func TestAssessRunStatusMultipleFailures(t *testing.T) { func TestAssessRunStatusMultipleFailuresInDryRunMode(t *testing.T) { status, message, dryRunSummary := StartAssessRunStatusMultipleFailures(t, true) - assert.Equal(t, v1alpha1.AnalysisPhaseRunning, status) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, status) assert.Equal(t, "", message) expectedDryRunSummary := v1alpha1.RunSummary{ Count: 2, @@ -1613,6 +1613,115 @@ func StartAssessRunStatusWorstMessageInReconcileAnalysisRun(t *testing.T, isDryR return c.reconcileAnalysisRun(run) } +// TestAssessRunStatusWithOnlyDryRunMetrics verifies that if only dry-run metrics are getting evaluated then, the final +// status of the analysis run is always successful. +func TestAssessRunStatusWithOnlyDryRunMetrics(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + run := v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-metric", + Provider: v1alpha1.MetricProvider{ + Job: &v1alpha1.JobMetric{}, + }, + }, + }, + DryRun: []v1alpha1.DryRun{{ + MetricName: "success-metric", + }}, + }, + Status: v1alpha1.AnalysisRunStatus{ + MetricResults: []v1alpha1.MetricResult{ + { + Name: "success-metric", + Count: 1, + Successful: 1, + DryRun: true, + Phase: v1alpha1.AnalysisPhaseSuccessful, + Measurements: []v1alpha1.Measurement{{ + Phase: v1alpha1.AnalysisPhaseSuccessful, + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + }}, + }, + }, + }, + } + + f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseFailed), nil) + f.provider.On("Resume", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil) + + newRun := c.reconcileAnalysisRun(&run) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase) +} + +// TestAssessRunStatusWithMixedMetrics verifies that the status of dry-run metrics doesn't impact the final state of the +// analysis run. +func TestAssessRunStatusWithMixedMetrics(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + run := v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "run-forever", + Provider: v1alpha1.MetricProvider{ + Job: &v1alpha1.JobMetric{}, + }, + }, + { + Name: "success-metric", + Provider: v1alpha1.MetricProvider{ + Job: &v1alpha1.JobMetric{}, + }, + }, + }, + DryRun: []v1alpha1.DryRun{{ + MetricName: "success-metric", + }}, + }, + Status: v1alpha1.AnalysisRunStatus{ + MetricResults: []v1alpha1.MetricResult{ + { + Name: "run-forever", + Inconclusive: 1, + DryRun: false, + Phase: v1alpha1.AnalysisPhaseRunning, + Measurements: []v1alpha1.Measurement{{ + Phase: v1alpha1.AnalysisPhaseRunning, + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + }}, + }, + { + Name: "success-metric", + Count: 1, + Failed: 1, + DryRun: true, + Phase: v1alpha1.AnalysisPhaseSuccessful, + Measurements: []v1alpha1.Measurement{{ + Phase: v1alpha1.AnalysisPhaseFailed, + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + }}, + }, + }, + }, + } + + f.provider.On("Run", mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseFailed), nil) + f.provider.On("Resume", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(newMeasurement(v1alpha1.AnalysisPhaseSuccessful), nil) + + newRun := c.reconcileAnalysisRun(&run) + assert.Equal(t, v1alpha1.AnalysisPhaseInconclusive, newRun.Status.Phase) + assert.Equal(t, "Metric \"run-forever\" assessed Inconclusive due to inconclusive (1) > inconclusiveLimit (0)", newRun.Status.Message) +} + // TestAssessRunStatusWorstMessageInReconcileAnalysisRun verifies that the worstMessage returned by assessRunStatus is set as the // status of the AnalysisRun returned by reconcileAnalysisRun func TestAssessRunStatusWorstMessageInReconcileAnalysisRun(t *testing.T) { @@ -1623,7 +1732,7 @@ func TestAssessRunStatusWorstMessageInReconcileAnalysisRun(t *testing.T) { func TestAssessRunStatusWorstMessageInReconcileAnalysisRunInDryRunMode(t *testing.T) { newRun := StartAssessRunStatusWorstMessageInReconcileAnalysisRun(t, true) - assert.Equal(t, v1alpha1.AnalysisPhaseRunning, newRun.Status.Phase) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase) assert.Equal(t, "", newRun.Status.Message) expectedDryRunSummary := v1alpha1.RunSummary{ Count: 2,