From 57b38282556e0e6ad00b431845ac0168c7f5b6e4 Mon Sep 17 00:00:00 2001 From: Nelson Rodrigues Date: Fri, 12 Feb 2021 14:26:19 +0000 Subject: [PATCH 1/9] docs: Add Jungle to USERS.md (#5096) Signed-off-by: Nelson Rodrigues --- USERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/USERS.md b/USERS.md index 4c06446d263f..6d8bc5caec4f 100644 --- a/USERS.md +++ b/USERS.md @@ -64,6 +64,7 @@ Currently, the following organizations are **officially** using Argo Workflows: 1. [Intuit](https://www.intuit.com/) 1. [InVision](https://www.invisionapp.com/) 1. [İşbank](https://www.isbank.com.tr/en) +1. [Jungle](https://www.jungle.ai/) 1. [Karius](https://www.kariusdx.com/) 1. [Kasa](https://www.kasa.co.kr/) 1. [KintoHub](https://www.kintohub.com/) From cda5dc2e318e40094159f28a4602247ff533f6c8 Mon Sep 17 00:00:00 2001 From: Yuan Tang Date: Fri, 12 Feb 2021 10:09:44 -0500 Subject: [PATCH 2/9] docs: Add document for environment variables (#5080) Signed-off-by: terrytangyuan --- docs/environment-variables.md | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 docs/environment-variables.md diff --git a/docs/environment-variables.md b/docs/environment-variables.md new file mode 100644 index 000000000000..b9410671fd3f --- /dev/null +++ b/docs/environment-variables.md @@ -0,0 +1,35 @@ +# Environment Variables + +This document outlines the set of environment variables that can be used to customize the behaviours at different levels. +These environment variables are typically added to test out experimental features and should not be needed by most users. +Note that these environment variables may be removed at any time. + +## Controller + +| Name | Type | Description| +|----------|------|------------| +| `ALL_POD_CHANGES_SIGNIFICANT` | `bool` | Whether to consider all pod changes as significant during pod reconciliation. | +| `ALWAYS_OFFLOAD_NODE_STATUS` | `bool` | Whether to always offload the node status. | +| `ARCHIVED_WORKFLOW_GC_PERIOD` | `time.Duration` | The periodicity for GC of archived workflows. | +| `ARGO_TRACE` | `bool` | Whether to enable tracing statements in Argo components. | +| `DEFAULT_REQUEUE_TIME` | `time.Duration` | The requeue time for the rate limiter of the workflow queue. | +| `LEADER_ELECTION_IDENTITY` | `string` | The ID used for workflow controllers to elect a leader. | +| `MAX_OPERATION_TIME` | `time.Duration` | The maximum time a workflow operation is allowed to run for before requeuing the workflow onto the work queue. | +| `OFFLOAD_NODE_STATUS_TTL` | `time.Duration` | The TTL to delete the offloaded node status. Currently only used for testing. | +| `RECENTLY_STARTED_POD_DURATION` | `time.Duration` | The duration of a pod before the pod is considered to be recently started. | +| `RETRY_BACKOFF_DURATION` | `time.Duration` | The retry backoff duration when retrying API calls. | +| `RETRY_BACKOFF_FACTOR` | `float` | The retry backoff factor when retrying API calls. | +| `RETRY_BACKOFF_STEPS` | `int` | The retry backoff steps when retrying API calls. | +| `TRANSIENT_ERROR_PATTERN` | `string` | The regular expression that represents additional patterns for transient errors. | +| `WF_DEL_PROPAGATION_POLICY` | `string` | The deletion propogation policy for workflows. | +| `WORKFLOW_GC_PERIOD` | `time.Duration` | The periodicity for GC of workflows. | + +## Executor + +| Name | Type | Description| +|----------|------|------------| +| `ARGO_CONTAINER_RUNTIME_EXECUTOR` | `string` | The name of the container runtime executor. | +| `ARGO_KUBELET_PORT` | `int` | The port to the Kubelet API. | +| `ARGO_KUBELET_INSECURE` | `bool` | Whether to disable the TLS verification. | +| `PNS_PRIVILEGED` | `bool` | Whether to always set privileged on for PNS when PNS executor is used. | +| `REMOVE_LOCAL_ART_PATH` | `bool` | Whether to remove local artifacts. | From 75d09b0f2b48dd87d6562436e220c58dca9e06fa Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian <33908564+sarabala1979@users.noreply.github.com> Date: Fri, 12 Feb 2021 09:50:31 -0800 Subject: [PATCH 3/9] fix: Synchronization lock handling in Step/DAG Template level (#5081) Signed-off-by: Saravanan Balasubramanian --- docs/fields.md | 10 + examples/workflow-template/templates.yaml | 5 + workflow/controller/dag.go | 2 +- workflow/controller/operator.go | 13 +- .../controller/operator_concurrency_test.go | 213 ++++++++++++++++++ workflow/controller/steps.go | 7 +- workflow/sync/sync_manager.go | 8 +- 7 files changed, 242 insertions(+), 16 deletions(-) diff --git a/docs/fields.md b/docs/fields.md index 9ddbf4ee3027..8e4bcb81a56b 100644 --- a/docs/fields.md +++ b/docs/fields.md @@ -1640,6 +1640,8 @@ Synchronization holds synchronization lock configuration - [`synchronization-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-tmpl-level.yaml) - [`synchronization-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-wf-level.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-template/templates.yaml) ### Fields @@ -2173,6 +2175,8 @@ SynchronizationStatus stores the status of semaphore and mutex. - [`synchronization-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-tmpl-level.yaml) - [`synchronization-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-wf-level.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-template/templates.yaml) ### Fields @@ -2507,6 +2511,8 @@ SemaphoreRef is a reference of Semaphore - [`synchronization-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-tmpl-level.yaml) - [`synchronization-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-wf-level.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-template/templates.yaml) ### Fields @@ -3192,6 +3198,8 @@ _No description available_ - [`synchronization-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-tmpl-level.yaml) - [`synchronization-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-wf-level.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-template/templates.yaml) ### Fields @@ -4652,6 +4660,8 @@ Selects a key from a ConfigMap. - [`synchronization-tmpl-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-tmpl-level.yaml) - [`synchronization-wf-level.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/synchronization-wf-level.yaml) + +- [`templates.yaml`](https://github.com/argoproj/argo-workflows/blob/master/examples/workflow-template/templates.yaml) ### Fields diff --git a/examples/workflow-template/templates.yaml b/examples/workflow-template/templates.yaml index f7aabb40a22d..560d3f367277 100644 --- a/examples/workflow-template/templates.yaml +++ b/examples/workflow-template/templates.yaml @@ -44,6 +44,11 @@ spec: command: [cowsay] args: ["{{inputs.parameters.message}}"] - name: inner-steps + synchronization: + semaphore: + configMapKeyRef: + name: my-config + key: template steps: - - name: inner-hello1 templateRef: diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index d7ae49a41e4d..f1be657177bc 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -340,7 +340,7 @@ func (woc *wfOperationCtx) executeDAGTask(ctx context.Context, dagCtx *dagContex } // Release acquired lock completed task. - if tmpl != nil && tmpl.Synchronization != nil { + if tmpl != nil { woc.controller.syncManager.Release(woc.wf, node.ID, tmpl.Synchronization) } diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 132fe3b35d8b..36a1862f6471 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -1610,9 +1610,8 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, if node != nil { if node.Fulfilled() { - if processedTmpl.Synchronization != nil { - woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization) - } + woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization) + woc.log.Debugf("Node %s already completed", nodeName) if processedTmpl.Metrics != nil { // Check if this node completed between executions. If it did, emit metrics. If a node completes within @@ -1770,9 +1769,7 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, if err != nil { node = woc.markNodeError(nodeName, err) - if processedTmpl.Synchronization != nil { - woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization) - } + woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization) // If retry policy is not set, or if it is not set to Always or OnError, we won't attempt to retry an errored container // and we return instead. @@ -1785,6 +1782,10 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, } } + if node.Fulfilled() { + woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization) + } + if processedTmpl.Metrics != nil { // Check if the node was just created, if it was emit realtime metrics. // If the node did not previously exist, we can infer that it was created during the current operation, emit real time metrics. diff --git a/workflow/controller/operator_concurrency_test.go b/workflow/controller/operator_concurrency_test.go index 64e2771a735a..105f627dc7b8 100644 --- a/workflow/controller/operator_concurrency_test.go +++ b/workflow/controller/operator_concurrency_test.go @@ -26,6 +26,7 @@ metadata: data: workflow: "2" template: "1" + step: "1" ` const wfWithSemaphore = ` apiVersion: argoproj.io/v1alpha1 @@ -486,3 +487,215 @@ func TestSynchronizationWithRetry(t *testing.T) { }) } + +const StepWithSync = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: steps-jklcl + namespace: default +spec: + entrypoint: hello-hello-hello + templates: + - arguments: {} + name: hello-hello-hello + steps: + - - arguments: + parameters: + - name: message + value: hello1 + name: hello1 + template: whalesay + synchronization: + semaphore: + configMapKeyRef: + key: step + name: my-config + - arguments: {} + container: + args: + - '{{inputs.parameters.message}}' + command: + - cowsay + image: docker/whalesay + inputs: + parameters: + - name: message + name: whalesay +` + +const StepWithSyncStatus = ` +piVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: steps-jklcl + namespace: default +spec: + entrypoint: hello-hello-hello + templates: + - inputs: {} + name: hello-hello-hello + steps: + - - arguments: + parameters: + - name: message + value: hello1 + name: hello1 + template: whalesay + synchronization: + semaphore: + configMapKeyRef: + key: step + name: my-config + - container: + args: + - '{{inputs.parameters.message}}' + command: + - cowsay + image: docker/whalesay + resources: {} + inputs: + parameters: + - name: message + name: whalesay +status: + artifactRepositoryRef: + configMap: artifact-repositories + key: default-v1 + namespace: argo + conditions: + - status: "False" + type: PodRunning + - status: "True" + type: Completed + finishedAt: "2021-02-11T19:46:55Z" + nodes: + steps-jklcl: + children: + - steps-jklcl-3895081407 + displayName: steps-jklcl + finishedAt: "2021-02-11T19:46:55Z" + id: steps-jklcl + name: steps-jklcl + outboundNodes: + - steps-jklcl-969694128 + phase: Running + progress: 1/1 + resourcesDuration: + cpu: 7 + memory: 4 + startedAt: "2021-02-11T19:46:33Z" + templateName: hello-hello-hello + templateScope: local/steps-jklcl + type: Steps + steps-jklcl-969694128: + boundaryID: steps-jklcl + displayName: hello1 + finishedAt: "2021-02-11T19:46:44Z" + id: steps-jklcl-969694128 + inputs: + parameters: + - name: message + value: hello1 + name: steps-jklcl[0].hello1 + outputs: + artifacts: + - archiveLogs: true + name: main-logs + s3: + accessKeySecret: + key: accesskey + name: my-minio-cred + bucket: my-bucket + endpoint: minio:9000 + insecure: true + key: steps-jklcl/steps-jklcl-969694128/main.log + secretKeySecret: + key: secretkey + name: my-minio-cred + exitCode: "0" + phase: Succeeded + progress: 1/1 + resourcesDuration: + cpu: 7 + memory: 4 + startedAt: "2021-02-11T19:46:33Z" + templateName: whalesay + templateScope: local/steps-jklcl + type: Pod + steps-jklcl-3895081407: + boundaryID: steps-jklcl + children: + - steps-jklcl-969694128 + displayName: '[0]' + finishedAt: "2021-02-11T19:46:55Z" + id: steps-jklcl-3895081407 + name: steps-jklcl[0] + phase: Succeeded + progress: 1/1 + resourcesDuration: + cpu: 7 + memory: 4 + startedAt: "2021-02-11T19:46:33Z" + templateScope: local/steps-jklcl + type: StepGroup + phase: Succeeded + progress: 1/1 + resourcesDuration: + cpu: 7 + memory: 4 + startedAt: "2021-02-11T19:46:33Z" + +` + +func TestSynchronizationWithStep(t *testing.T) { + assert := assert.New(t) + cancel, controller := newController() + defer cancel() + ctx := context.Background() + controller.syncManager = sync.NewLockManager(GetSyncLimitFunc(ctx, controller.kubeclientset), func(key string) { + }, workflowExistenceFunc) + var cm v1.ConfigMap + err := yaml.Unmarshal([]byte(configMap), &cm) + assert.NoError(err) + _, err = controller.kubeclientset.CoreV1().ConfigMaps("default").Create(ctx, &cm, metav1.CreateOptions{}) + assert.NoError(err) + + t.Run("StepWithSychronization", func(t *testing.T) { + //First workflow Acquire the lock + wf := unmarshalWF(StepWithSync) + wf, err := controller.wfclientset.ArgoprojV1alpha1().Workflows("default").Create(ctx, wf, metav1.CreateOptions{}) + assert.NoError(err) + woc := newWorkflowOperationCtx(wf, controller) + woc.operate(ctx) + assert.NotNil(woc.wf.Status.Synchronization) + assert.NotNil(woc.wf.Status.Synchronization.Semaphore) + assert.Len(woc.wf.Status.Synchronization.Semaphore.Holding, 1) + + // Second workflow try to acquire the lock and wait for lock + wf1 := unmarshalWF(StepWithSync) + wf1.Name = "step2" + wf1, err = controller.wfclientset.ArgoprojV1alpha1().Workflows("default").Create(ctx, wf1, metav1.CreateOptions{}) + assert.NoError(err) + woc1 := newWorkflowOperationCtx(wf1, controller) + woc1.operate(ctx) + assert.NotNil(woc1.wf.Status.Synchronization) + assert.NotNil(woc1.wf.Status.Synchronization.Semaphore) + assert.Nil(woc1.wf.Status.Synchronization.Semaphore.Holding) + assert.Len(woc1.wf.Status.Synchronization.Semaphore.Waiting, 1) + + //Finished all StepGroup in step + wf = unmarshalWF(StepWithSyncStatus) + woc = newWorkflowOperationCtx(wf, controller) + woc.operate(ctx) + assert.Nil(woc.wf.Status.Synchronization) + + // Second workflow acquire the lock + woc1 = newWorkflowOperationCtx(woc1.wf, controller) + woc1.operate(ctx) + assert.NotNil(woc1.wf.Status.Synchronization) + assert.NotNil(woc1.wf.Status.Synchronization.Semaphore) + assert.NotNil(woc1.wf.Status.Synchronization.Semaphore.Holding) + assert.Len(woc1.wf.Status.Synchronization.Semaphore.Holding, 1) + }) +} diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index 004ccfa01476..44e5ffaad442 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -94,11 +94,7 @@ func (woc *wfOperationCtx) executeSteps(ctx context.Context, nodeName string, tm sgNode := woc.executeStepGroup(ctx, stepGroup.Steps, sgNodeName, &stepsCtx) - if sgNode.Fulfilled() { - if tmpl.Synchronization != nil { - woc.controller.syncManager.Release(woc.wf, node.ID, tmpl.Synchronization) - } - } else { + if !sgNode.Fulfilled() { woc.log.Infof("Workflow step group node %s not yet completed", sgNode.ID) return node, nil } @@ -147,6 +143,7 @@ func (woc *wfOperationCtx) executeSteps(ctx context.Context, nodeName string, tm } } } + woc.updateOutboundNodes(nodeName, tmpl) // If this template has outputs from any of its steps, copy them to this node here outputs, err := getTemplateOutputsFromScope(tmpl, stepsCtx.scope) diff --git a/workflow/sync/sync_manager.go b/workflow/sync/sync_manager.go index c205f45f6009..c07b902aae73 100644 --- a/workflow/sync/sync_manager.go +++ b/workflow/sync/sync_manager.go @@ -44,7 +44,7 @@ func (cm *Manager) getWorkflowKey(key string) (string, error) { } func (cm *Manager) CheckWorkflowExistence() { - log.Infof("Check the workflow existence") + log.Debug("Check the workflow existence") for _, lock := range cm.syncLockMap { keys := lock.getCurrentHolders() keys = append(keys, lock.getCurrentPending()...) @@ -173,13 +173,13 @@ func (cm *Manager) TryAcquire(wf *wfv1.Workflow, nodeName string, syncLockRef *w } func (cm *Manager) Release(wf *wfv1.Workflow, nodeName string, syncRef *wfv1.Synchronization) { - cm.lock.Lock() - defer cm.lock.Unlock() - if syncRef == nil { return } + cm.lock.Lock() + defer cm.lock.Unlock() + holderKey := getHolderKey(wf, nodeName) lockName, err := GetLockName(syncRef, wf.Namespace) if err != nil { From 68979f6e3dab8225765e166d346502e7e66b0c77 Mon Sep 17 00:00:00 2001 From: Simon Behar Date: Fri, 12 Feb 2021 10:35:19 -0800 Subject: [PATCH 4/9] fix: Do not create pods under shutdown strategy (#5055) Signed-off-by: Simon Behar --- test/e2e/functional/stop-terminate-2.yaml | 24 +++++++++++++++++ test/e2e/functional/stop-terminate.yaml | 6 ++--- test/e2e/signals_test.go | 32 +++++++++++++++++++--- workflow/controller/operator_test.go | 33 +++++++++++++++++++++++ workflow/controller/workflowpod.go | 6 +++++ 5 files changed, 94 insertions(+), 7 deletions(-) create mode 100644 test/e2e/functional/stop-terminate-2.yaml diff --git a/test/e2e/functional/stop-terminate-2.yaml b/test/e2e/functional/stop-terminate-2.yaml new file mode 100644 index 000000000000..c6792000e326 --- /dev/null +++ b/test/e2e/functional/stop-terminate-2.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: stop-terminate- + labels: + argo-e2e: true +spec: + entrypoint: main + templates: + - name: main + steps: + - - name: A + template: sleep + - - name: B + template: pass + + - name: sleep + container: + image: argoproj/argosay:v1 + args: [ sleep, "999"] + + - name: pass + container: + image: argoproj/argosay:v1 diff --git a/test/e2e/functional/stop-terminate.yaml b/test/e2e/functional/stop-terminate.yaml index 8d3da2e5d5b4..e0ab37e0e5a4 100644 --- a/test/e2e/functional/stop-terminate.yaml +++ b/test/e2e/functional/stop-terminate.yaml @@ -17,9 +17,9 @@ spec: - name: echo container: - image: argoproj/argosay:v2 - args: [ sleep, "999" ] + image: argoproj/argosay:v1 + args: [ sleep, "999"] - name: exit container: - image: argoproj/argosay:v2 \ No newline at end of file + image: argoproj/argosay:v1 diff --git a/test/e2e/signals_test.go b/test/e2e/signals_test.go index 4898a2d2a768..e8add2f5b54e 100644 --- a/test/e2e/signals_test.go +++ b/test/e2e/signals_test.go @@ -34,7 +34,7 @@ func (s *SignalsSuite) TestStopBehavior() { assert.NoError(t, err) assert.Regexp(t, "workflow stop-terminate-.* stopped", output) }). - WaitForWorkflow(). + WaitForWorkflow(1 * time.Minute). Then(). ExpectWorkflow(func(t *testing.T, m *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.WorkflowFailed, status.Phase) @@ -63,7 +63,7 @@ func (s *SignalsSuite) TestTerminateBehavior() { assert.NoError(t, err) assert.Regexp(t, "workflow stop-terminate-.* terminated", output) }). - WaitForWorkflow(). + WaitForWorkflow(1 * time.Minute). Then(). ExpectWorkflow(func(t *testing.T, m *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.WorkflowFailed, status.Phase) @@ -78,6 +78,30 @@ func (s *SignalsSuite) TestTerminateBehavior() { }) } +// Tests that new pods are never created once a stop shutdown strategy has been added +func (s *SignalsSuite) TestDoNotCreatePodsUnderStopBehavior() { + s.Given(). + Workflow("@functional/stop-terminate-2.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToStart, "to start"). + RunCli([]string{"stop", "@latest"}, func(t *testing.T, output string, err error) { + assert.NoError(t, err) + assert.Regexp(t, "workflow stop-terminate-.* stopped", output) + }). + WaitForWorkflow(1 * time.Minute). + Then(). + ExpectWorkflow(func(t *testing.T, m *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, wfv1.WorkflowFailed, status.Phase) + nodeStatus := status.Nodes.FindByDisplayName("A") + if assert.NotNil(t, nodeStatus) { + assert.Equal(t, wfv1.NodeFailed, nodeStatus.Phase) + } + nodeStatus = status.Nodes.FindByDisplayName("B") + assert.Nil(t, nodeStatus) + }) +} + func (s *SignalsSuite) TestPropagateMaxDuration() { s.T().Skip("too hard to get working") s.Given(). @@ -106,7 +130,7 @@ spec: `). When(). SubmitWorkflow(). - WaitForWorkflow(45 * time.Second). + WaitForWorkflow(1 * time.Minute). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.WorkflowFailed, status.Phase) @@ -123,7 +147,7 @@ func (s *SignalsSuite) TestSidecars() { Workflow("@testdata/sidecar-workflow.yaml"). When(). SubmitWorkflow(). - WaitForWorkflow(). + WaitForWorkflow(1 * time.Minute). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index aaf2107f48d5..cd03158280bd 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -5770,6 +5770,7 @@ func TestParamAggregation(t *testing.T) { } } } + func TestRetryOnDiffHost(t *testing.T) { cancel, controller := newController() defer cancel() @@ -5836,3 +5837,35 @@ func TestRetryOnDiffHost(t *testing.T) { } assert.Equal(t, sourceNodeSelectorRequirement, targetNodeSelectorRequirement) } + +var noPodsWhenShutdown = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + name: hello-world +spec: + entrypoint: whalesay + shutdown: "Stop" + templates: + - name: whalesay + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["hello world"] +` + +func TestNoPodsWhenShutdown(t *testing.T) { + wf := unmarshalWF(noPodsWhenShutdown) + cancel, controller := newController(wf) + defer cancel() + + ctx := context.Background() + woc := newWorkflowOperationCtx(wf, controller) + woc.operate(ctx) + + node := woc.wf.Status.Nodes.FindByDisplayName("hello-world") + if assert.NotNil(t, node) { + assert.Equal(t, wfv1.NodeSkipped, node.Phase) + assert.Contains(t, node.Message, "workflow shutdown with strategy: Stop") + } +} diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 18abcd9a827a..728e98d86c14 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -147,6 +147,12 @@ func (woc *wfOperationCtx) createWorkflowPod(ctx context.Context, nodeName strin } } + if !woc.execWf.Spec.Shutdown.ShouldExecute(opts.onExitPod) { + // Do not create pods if we are shutting down + woc.markNodePhase(nodeName, wfv1.NodeSkipped, fmt.Sprintf("workflow shutdown with strategy: %s", woc.execWf.Spec.Shutdown)) + return nil, nil + } + tmpl = tmpl.DeepCopy() wfSpec := woc.execWf.Spec.DeepCopy() From 2ff4db115daa4e801da10938ecdb9e27d5810b35 Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Fri, 12 Feb 2021 12:03:27 -0800 Subject: [PATCH 5/9] feat(executor): Minimize the number of Kubernetes API requests made by executors (#4954) Signed-off-by: Alex Collins --- .github/workflows/ci-build.yaml | 2 +- Makefile | 6 +- cmd/argoexec/commands/root.go | 6 +- pkg/apis/workflow/v1alpha1/workflow_types.go | 9 + .../workflow/v1alpha1/workflow_types_test.go | 9 + test/e2e/signals_test.go | 13 +- test/e2e/testdata/sidecar-workflow.yaml | 10 +- test/stress/tool/main.go | 3 +- workflow/common/common.go | 2 + workflow/controller/operator.go | 120 ++---- workflow/controller/operator_test.go | 2 +- workflow/controller/workflowpod.go | 13 +- workflow/executor/common/common.go | 110 +++--- workflow/executor/common/common_test.go | 35 +- workflow/executor/common/wait/wait.go | 58 --- workflow/executor/docker/docker.go | 143 +++++++- workflow/executor/executor.go | 218 ++--------- workflow/executor/executor_test.go | 18 +- workflow/executor/k8sapi/client.go | 96 +++-- workflow/executor/k8sapi/k8sapi.go | 52 +-- workflow/executor/k8sapi/k8sapi_test.go | 19 + workflow/executor/kubelet/client.go | 124 +++---- workflow/executor/kubelet/kubelet.go | 38 +- .../mocks/ContainerRuntimeExecutor.go | 70 ++-- workflow/executor/pns/pns.go | 346 ++++++++---------- workflow/executor/pns/pns_test.go | 12 - workflow/executor/resource_test.go | 1 - 27 files changed, 716 insertions(+), 819 deletions(-) delete mode 100644 workflow/executor/common/wait/wait.go create mode 100644 workflow/executor/k8sapi/k8sapi_test.go diff --git a/.github/workflows/ci-build.yaml b/.github/workflows/ci-build.yaml index 88f1333a3e9e..8f5071428dd0 100644 --- a/.github/workflows/ci-build.yaml +++ b/.github/workflows/ci-build.yaml @@ -77,7 +77,7 @@ jobs: - name: Pre-pull images env: GOPATH: /home/runner/go - run: make pull-build-images test-images & + run: make test-images & - name: Create Kubeconfig run: | mkdir -p ~/.kube diff --git a/Makefile b/Makefile index 1eea71888aed..57ce2019f6e6 100644 --- a/Makefile +++ b/Makefile @@ -383,7 +383,7 @@ install: $(MANIFESTS) $(E2E_MANIFESTS) dist/kustomize kubectl get ns $(KUBE_NAMESPACE) || kubectl create ns $(KUBE_NAMESPACE) kubectl config set-context --current --namespace=$(KUBE_NAMESPACE) @echo "installing PROFILE=$(PROFILE) VERSION=$(VERSION), E2E_EXECUTOR=$(E2E_EXECUTOR)" - dist/kustomize build --load_restrictor=none test/e2e/manifests/$(PROFILE) | sed 's/image: argoproj/image: $(IMAGE_NAMESPACE)/' | sed 's/:latest/:$(VERSION)/' | sed 's/pns/$(E2E_EXECUTOR)/' | kubectl -n $(KUBE_NAMESPACE) apply -f - + dist/kustomize build --load_restrictor=none test/e2e/manifests/$(PROFILE) | sed 's/argoproj\//$(IMAGE_NAMESPACE)\//' | sed 's/:latest/:$(VERSION)/' | sed 's/pns/$(E2E_EXECUTOR)/' | kubectl -n $(KUBE_NAMESPACE) apply -f - kubectl -n $(KUBE_NAMESPACE) apply -f test/stress/massive-workflow.yaml kubectl -n $(KUBE_NAMESPACE) rollout restart deploy workflow-controller kubectl -n $(KUBE_NAMESPACE) rollout restart deploy argo-server @@ -394,10 +394,6 @@ ifeq ($(RUN_MODE),kubernetes) kubectl -n $(KUBE_NAMESPACE) scale deploy/argo-server --replicas 1 endif -.PHONY: pull-build-images -pull-build-images: - ./hack/pull-build-images.sh - .PHONY: argosay argosay: test/e2e/images/argosay/v2/argosay cd test/e2e/images/argosay/v2 && docker build . -t argoproj/argosay:v2 diff --git a/cmd/argoexec/commands/root.go b/cmd/argoexec/commands/root.go index d3e98715f4ef..0dab6fa2329f 100644 --- a/cmd/argoexec/commands/root.go +++ b/cmd/argoexec/commands/root.go @@ -101,11 +101,11 @@ func initExecutor() *executor.WorkflowExecutor { case common.ContainerRuntimeExecutorK8sAPI: cre, err = k8sapi.NewK8sAPIExecutor(clientset, config, podName, namespace) case common.ContainerRuntimeExecutorKubelet: - cre, err = kubelet.NewKubeletExecutor() + cre, err = kubelet.NewKubeletExecutor(namespace, podName) case common.ContainerRuntimeExecutorPNS: - cre, err = pns.NewPNSExecutor(clientset, podName, namespace, tmpl.Outputs.HasOutputs()) + cre, err = pns.NewPNSExecutor(clientset, podName, namespace) default: - cre, err = docker.NewDockerExecutor() + cre, err = docker.NewDockerExecutor(namespace, podName) } checkErr(err) diff --git a/pkg/apis/workflow/v1alpha1/workflow_types.go b/pkg/apis/workflow/v1alpha1/workflow_types.go index 6543cbd04f64..ea063248e8ca 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_types.go +++ b/pkg/apis/workflow/v1alpha1/workflow_types.go @@ -621,6 +621,15 @@ func (tmpl *Template) HasPodSpecPatch() bool { return tmpl.PodSpecPatch != "" } +func (tmpl *Template) GetSidecarNames() []string { + var containerNames []string + for _, s := range tmpl.Sidecars { + containerNames = append(containerNames, s.Name) + } + return containerNames + +} + type Artifacts []Artifact func (a Artifacts) GetArtifactByName(name string) *Artifact { diff --git a/pkg/apis/workflow/v1alpha1/workflow_types_test.go b/pkg/apis/workflow/v1alpha1/workflow_types_test.go index 31b70cd76c41..a5fd70d5dbe1 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_types_test.go +++ b/pkg/apis/workflow/v1alpha1/workflow_types_test.go @@ -610,3 +610,12 @@ func TestWorkflow_GetSemaphoreKeys(t *testing.T) { assert.Contains(keys, "test/template") assert.Contains(keys, "test/template1") } + +func TestTemplate_GetSidecarNames(t *testing.T) { + m := &Template{ + Sidecars: []UserContainer{ + {Container: corev1.Container{Name: "sidecar-0"}}, + }, + } + assert.ElementsMatch(t, []string{"sidecar-0"}, m.GetSidecarNames()) +} diff --git a/test/e2e/signals_test.go b/test/e2e/signals_test.go index e8add2f5b54e..2d5cecd62013 100644 --- a/test/e2e/signals_test.go +++ b/test/e2e/signals_test.go @@ -14,6 +14,9 @@ import ( "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" ) +// Tests the use of signals to kill containers. +// argoproj/argosay:v2 does not contain sh, so you must use argoproj/argosay:v1. +// Killing often requires SIGKILL, which is issued 30s after SIGTERM. So tests need longer (>30s) timeout. type SignalsSuite struct { fixtures.E2ESuite } @@ -37,10 +40,10 @@ func (s *SignalsSuite) TestStopBehavior() { WaitForWorkflow(1 * time.Minute). Then(). ExpectWorkflow(func(t *testing.T, m *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowFailed, status.Phase) + assert.Contains(t, []wfv1.WorkflowPhase{wfv1.WorkflowFailed, wfv1.WorkflowError}, status.Phase) nodeStatus := status.Nodes.FindByDisplayName("A") if assert.NotNil(t, nodeStatus) { - assert.Equal(t, wfv1.NodeFailed, nodeStatus.Phase) + assert.Contains(t, []wfv1.NodePhase{wfv1.NodeFailed, wfv1.NodeError}, nodeStatus.Phase) } nodeStatus = status.Nodes.FindByDisplayName("A.onExit") if assert.NotNil(t, nodeStatus) { @@ -66,10 +69,10 @@ func (s *SignalsSuite) TestTerminateBehavior() { WaitForWorkflow(1 * time.Minute). Then(). ExpectWorkflow(func(t *testing.T, m *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowFailed, status.Phase) + assert.Contains(t, []wfv1.WorkflowPhase{wfv1.WorkflowFailed, wfv1.WorkflowError}, status.Phase) nodeStatus := status.Nodes.FindByDisplayName("A") if assert.NotNil(t, nodeStatus) { - assert.Equal(t, wfv1.NodeFailed, nodeStatus.Phase) + assert.Contains(t, []wfv1.NodePhase{wfv1.NodeFailed, wfv1.NodeError}, nodeStatus.Phase) } nodeStatus = status.Nodes.FindByDisplayName("A.onExit") assert.Nil(t, nodeStatus) @@ -133,7 +136,7 @@ spec: WaitForWorkflow(1 * time.Minute). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { - assert.Equal(t, wfv1.WorkflowFailed, status.Phase) + assert.Contains(t, []wfv1.WorkflowPhase{wfv1.WorkflowFailed, wfv1.WorkflowError}, status.Phase) assert.Len(t, status.Nodes, 3) node := status.Nodes.FindByDisplayName("retry-backoff-2(1)") if assert.NotNil(t, node) { diff --git a/test/e2e/testdata/sidecar-workflow.yaml b/test/e2e/testdata/sidecar-workflow.yaml index 8712f82fd870..a0fad6f44f33 100644 --- a/test/e2e/testdata/sidecar-workflow.yaml +++ b/test/e2e/testdata/sidecar-workflow.yaml @@ -9,8 +9,12 @@ spec: templates: - name: main container: - image: argoproj/argosay:v2 + image: argoproj/argosay:v1 + args: [ sleep, "5s" ] sidecars: - name: sidecar-0 - image: argoproj/argosay:v2 - argso: [ sleep, "999" ] + image: argoproj/argosay:v1 + args: [ sleep, "999s" ] + - name: sidecar-1 + image: argoproj/argosay:v1 + args: [ sleep, "999s" ] diff --git a/test/stress/tool/main.go b/test/stress/tool/main.go index 84afbf1d5e9a..648e1e476d15 100644 --- a/test/stress/tool/main.go +++ b/test/stress/tool/main.go @@ -4,6 +4,7 @@ import ( "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/yaml" @@ -45,7 +46,7 @@ spec: } ctx := context.Background() - for i := 0; i < 100; i++ { + for i := 0; i < 500; i++ { _, err := w.Create(ctx, wf, metav1.CreateOptions{}) if err != nil { panic(err) diff --git a/workflow/common/common.go b/workflow/common/common.go index 75e222df6fb8..0de34e562d3e 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -105,6 +105,8 @@ const ( // EnvVarPodName contains the name of the pod (currently unused) EnvVarPodName = "ARGO_POD_NAME" + // EnvVarContainerName container the container's name for the current pod + EnvVarContainerName = "ARGO_CONTAINER_NAME" // EnvVarContainerRuntimeExecutor contains the name of the container runtime executor to use, empty is equal to "docker" EnvVarContainerRuntimeExecutor = "ARGO_CONTAINER_RUNTIME_EXECUTOR" // EnvVarDownwardAPINodeIP is the envvar used to get the `status.hostIP` diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 36a1862f6471..d98fda64299d 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -1272,33 +1272,23 @@ func inferFailedReason(pod *apiv1.Pod) (wfv1.NodePhase, string) { // We only get one message to set for the overall node status. // If multiple containers failed, in order of preference: // init, main (annotated), main (exit code), wait, sidecars - for _, ctr := range pod.Status.InitContainerStatuses { - // Virtual Kubelet environment will not set the terminate on waiting container - // https://github.com/argoproj/argo-workflows/issues/3879 - // https://github.com/virtual-kubelet/virtual-kubelet/blob/7f2a02291530d2df14905702e6d51500dd57640a/node/sync.go#L195-L208 - if ctr.State.Waiting != nil { - return wfv1.NodeError, fmt.Sprintf("Pod failed before %s container starts", ctr.Name) + order := func(n string) int { + order, ok := map[string]int{ + common.InitContainerName: 0, + common.MainContainerName: 1, + common.WaitContainerName: 2, + }[n] + if ok { + return order } - if ctr.State.Terminated == nil { - // We should never get here - log.Warnf("Pod %s phase was Failed but %s did not have terminated state", pod.ObjectMeta.Name, ctr.Name) - continue - } - if ctr.State.Terminated.ExitCode == 0 { - continue - } - errMsg := "failed to load artifacts" - for _, msg := range []string{annotatedMsg, ctr.State.Terminated.Message} { - if msg != "" { - errMsg += ": " + msg - break - } - } - // NOTE: we consider artifact load issues as Error instead of Failed - return wfv1.NodeError, errMsg + return 3 } - failMessages := make(map[string]string) - for _, ctr := range pod.Status.ContainerStatuses { + + ctrs := append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) + sort.Slice(ctrs, func(i, j int) bool { return order(ctrs[i].Name) < order(ctrs[j].Name) }) + + for _, ctr := range ctrs { + // Virtual Kubelet environment will not set the terminate on waiting container // https://github.com/argoproj/argo-workflows/issues/3879 // https://github.com/virtual-kubelet/virtual-kubelet/blob/7f2a02291530d2df14905702e6d51500dd57640a/node/sync.go#L195-L208 @@ -1306,76 +1296,40 @@ func inferFailedReason(pod *apiv1.Pod) (wfv1.NodePhase, string) { if ctr.State.Waiting != nil { return wfv1.NodeError, fmt.Sprintf("Pod failed before %s container starts", ctr.Name) } - if ctr.State.Terminated == nil { + t := ctr.State.Terminated + if t == nil { // We should never get here - log.Warnf("Pod %s phase was Failed but %s did not have terminated state", pod.ObjectMeta.Name, ctr.Name) - continue - } - if ctr.State.Terminated.ExitCode == 0 { - continue - } - if ctr.State.Terminated.Message == "" && ctr.State.Terminated.Reason == "OOMKilled" { - failMessages[ctr.Name] = ctr.State.Terminated.Reason - continue - } - if ctr.Name == common.WaitContainerName { - errDetails := "" - for _, msg := range []string{annotatedMsg, ctr.State.Terminated.Message} { - if msg != "" { - errDetails = msg - break - } - } - if errDetails == "" { - // executor is expected to annotate a message to the pod upon any errors. - // If we failed to see the annotated message, it is likely the pod ran with - // insufficient privileges. Give a hint to that effect. - errDetails = fmt.Sprintf("verify serviceaccount %s:%s has necessary privileges", pod.ObjectMeta.Namespace, pod.Spec.ServiceAccountName) - } - errMsg := fmt.Sprintf("failed to save outputs: %s", errDetails) - failMessages[ctr.Name] = errMsg + log.Warnf("Pod %s phase was Failed but %s did not have terminated state", pod.Name, ctr.Name) continue } - if ctr.State.Terminated.Message != "" { - errMsg := ctr.State.Terminated.Message - if ctr.Name != common.MainContainerName { - errMsg = fmt.Sprintf("sidecar '%s' %s", ctr.Name, errMsg) - } - failMessages[ctr.Name] = errMsg + if t.ExitCode == 0 { continue } - errMsg := fmt.Sprintf("failed with exit code %d", ctr.State.Terminated.ExitCode) - if ctr.Name != common.MainContainerName { - if ctr.State.Terminated.ExitCode == 137 || ctr.State.Terminated.ExitCode == 143 { + + msg := fmt.Sprintf("exit code %d: %s; %s; %s", t.ExitCode, t.Reason, t.Message, annotatedMsg) + + switch ctr.Name { + case common.InitContainerName: + return wfv1.NodeError, msg + case common.MainContainerName: + return wfv1.NodeFailed, msg + case common.WaitContainerName: + // executor is expected to annotate a message to the pod upon any errors. + // If we failed to see the annotated message, it is likely the pod ran with + // insufficient privileges. Give a hint to that effect. + return wfv1.NodeError, fmt.Sprintf("%s; verify serviceaccount %s:%s has necessary privileges", msg, pod.Namespace, pod.Spec.ServiceAccountName) + default: + if t.ExitCode == 137 || t.ExitCode == 143 { // if the sidecar was SIGKILL'd (exit code 137) assume it was because argoexec // forcibly killed the container, which we ignore the error for. // Java code 143 is a normal exit 128 + 15 https://github.com/elastic/elasticsearch/issues/31847 - log.Infof("Ignoring %d exit code of sidecar '%s'", ctr.State.Terminated.ExitCode, ctr.Name) - continue + log.Infof("Ignoring %d exit code of container '%s'", t.ExitCode, ctr.Name) + } else { + return wfv1.NodeFailed, msg } - errMsg = fmt.Sprintf("sidecar '%s' %s", ctr.Name, errMsg) } - failMessages[ctr.Name] = errMsg - } - if failMsg, ok := failMessages[common.MainContainerName]; ok { - _, ok = failMessages[common.WaitContainerName] - isResourceTemplate := !ok - if isResourceTemplate && annotatedMsg != "" { - // For resource templates, we prefer the annotated message - // over the vanilla exit code 1 error - return wfv1.NodeFailed, annotatedMsg - } - return wfv1.NodeFailed, failMsg - } - if failMsg, ok := failMessages[common.WaitContainerName]; ok { - return wfv1.NodeError, failMsg } - // If we get here, both the main and wait container succeeded. Iterate the fail messages to - // identify the sidecar which failed and return the message. - for _, failMsg := range failMessages { - return wfv1.NodeFailed, failMsg - } // If we get here, we have detected that the main/wait containers succeed but the sidecar(s) // were SIGKILL'd. The executor may have had to forcefully terminate the sidecar (kill -9), // resulting in a 137 exit code (which we had ignored earlier). If failMessages is empty, it diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index cd03158280bd..7de8d44ba073 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -5439,7 +5439,7 @@ func TestPodFailureWithContainerOOM(t *testing.T) { assert.NotNil(t, pod) nodeStatus, msg := inferFailedReason(&pod) assert.Equal(t, tt.phase, nodeStatus) - assert.Equal(t, msg, "OOMKilled") + assert.Contains(t, msg, "OOMKilled") } } diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 728e98d86c14..aa6c6834c2cc 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -291,6 +291,11 @@ func (woc *wfOperationCtx) createWorkflowPod(ctx context.Context, nodeName strin } addOutputArtifactsVolumes(pod, tmpl) + for i, c := range pod.Spec.Containers { + c.Env = append(c.Env, apiv1.EnvVar{Name: common.EnvVarContainerName, Value: c.Name}) // used to identify the container name of the process + pod.Spec.Containers[i] = c + } + // Set the container template JSON in pod annotations, which executor examines for things like // artifact location/path. tmplBytes, err := json.Marshal(tmpl) @@ -428,13 +433,13 @@ func substitutePodParams(pod *apiv1.Pod, globalParams common.Parameters, tmpl *w func (woc *wfOperationCtx) newInitContainer(tmpl *wfv1.Template) apiv1.Container { ctr := woc.newExecContainer(common.InitContainerName, tmpl) - ctr.Command = []string{"argoexec", "init"} + ctr.Command = []string{"argoexec", "init", "--loglevel", getExecutorLogLevel()} return *ctr } func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Container, error) { ctr := woc.newExecContainer(common.WaitContainerName, tmpl) - ctr.Command = []string{"argoexec", "wait"} + ctr.Command = []string{"argoexec", "wait", "--loglevel", getExecutorLogLevel()} switch woc.controller.GetContainerRuntimeExecutor() { case common.ContainerRuntimeExecutorPNS: ctr.SecurityContext = &apiv1.SecurityContext{ @@ -459,6 +464,10 @@ func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Contain return ctr, nil } +func getExecutorLogLevel() string { + return log.GetLevel().String() +} + // hasPrivilegedContainers tests if the main container or sidecars is privileged func hasPrivilegedContainers(tmpl *wfv1.Template) bool { if containerIsPrivileged(tmpl.Container) { diff --git a/workflow/executor/common/common.go b/workflow/executor/common/common.go index 230a53bda61d..712b32c97274 100644 --- a/workflow/executor/common/common.go +++ b/workflow/executor/common/common.go @@ -12,6 +12,8 @@ import ( log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + + "github.com/argoproj/argo-workflows/v3/util/slice" ) const ( @@ -19,24 +21,25 @@ const ( ) // GetContainerID returns container ID of a ContainerStatus resource -func GetContainerID(container *v1.ContainerStatus) string { - i := strings.Index(container.ContainerID, containerShimPrefix) +func GetContainerID(container string) string { + i := strings.Index(container, containerShimPrefix) if i == -1 { - return "" + return container } - return container.ContainerID[i+len(containerShimPrefix):] + return container[i+len(containerShimPrefix):] } // KubernetesClientInterface is the interface to implement getContainerStatus method type KubernetesClientInterface interface { - GetContainerStatus(ctx context.Context, containerID string) (*v1.Pod, *v1.ContainerStatus, error) + GetContainerStatus(ctx context.Context, containerName string) (*v1.Pod, *v1.ContainerStatus, error) + GetContainerStatuses(ctx context.Context) (*v1.Pod, []v1.ContainerStatus, error) KillContainer(pod *v1.Pod, container *v1.ContainerStatus, sig syscall.Signal) error - CreateArchive(ctx context.Context, containerID, sourcePath string) (*bytes.Buffer, error) + CreateArchive(ctx context.Context, containerName, sourcePath string) (*bytes.Buffer, error) } -// WaitForTermination of the given containerID, set the timeout to 0 to discard it -func WaitForTermination(ctx context.Context, c KubernetesClientInterface, containerID string, timeout time.Duration) error { - ticker := time.NewTicker(time.Second * 1) +// WaitForTermination of the given containerName, set the timeout to 0 to discard it +func WaitForTermination(ctx context.Context, c KubernetesClientInterface, containerNames []string, timeout time.Duration) error { + ticker := time.NewTicker(time.Second * 5) defer ticker.Stop() timer := time.NewTimer(timeout) if timeout == 0 { @@ -46,78 +49,97 @@ func WaitForTermination(ctx context.Context, c KubernetesClientInterface, contai } else { defer timer.Stop() } - - log.Infof("Starting to wait completion of containerID %s ...", containerID) + log.Infof("Starting to wait completion of containers %s...", strings.Join(containerNames, ",")) for { select { case <-ticker.C: - _, containerStatus, err := c.GetContainerStatus(ctx, containerID) + done, err := isTerminated(ctx, c, containerNames) if err != nil { return err + } else if done { + return nil } - if containerStatus.State.Terminated == nil { - continue - } - log.Infof("ContainerID %q is terminated: %v", containerID, containerStatus.String()) - return nil case <-timer.C: return fmt.Errorf("timeout after %s", timeout.String()) } } } +func isTerminated(ctx context.Context, c KubernetesClientInterface, containerNames []string) (bool, error) { + _, containerStatus, err := c.GetContainerStatuses(ctx) + if err != nil { + return false, err + } + for _, s := range containerStatus { + log.Debugf("%q %v", s.Name, s.State.Terminated) + if s.State.Terminated == nil && slice.ContainsString(containerNames, s.Name) { + return false, nil + } + } + return true, nil +} + // TerminatePodWithContainerID invoke the given SIG against the PID1 of the container. // No-op if the container is on the hostPID -func TerminatePodWithContainerID(ctx context.Context, c KubernetesClientInterface, containerID string, sig syscall.Signal) error { - pod, container, err := c.GetContainerStatus(ctx, containerID) +func TerminatePodWithContainerNames(ctx context.Context, c KubernetesClientInterface, containerNames []string, sig syscall.Signal) error { + pod, containerStatuses, err := c.GetContainerStatuses(ctx) if err != nil { return err } - if container.State.Terminated != nil { - log.Infof("Container %s is already terminated: %v", container.ContainerID, container.State.Terminated.String()) - return nil - } - if pod.Spec.ShareProcessNamespace != nil && *pod.Spec.ShareProcessNamespace { - return fmt.Errorf("cannot terminate a process-namespace-shared Pod %s", pod.Name) - } - if pod.Spec.HostPID { - return fmt.Errorf("cannot terminate a hostPID Pod %s", pod.Name) - } - if pod.Spec.RestartPolicy != "Never" { - return fmt.Errorf("cannot terminate pod with a %q restart policy", pod.Spec.RestartPolicy) + for _, s := range containerStatuses { + if !slice.ContainsString(containerNames, s.Name) { + continue + } + if s.State.Terminated != nil { + log.Infof("Container %s is already terminated: %v", s.Name, s.State.Terminated.String()) + continue + } + if pod.Spec.ShareProcessNamespace != nil && *pod.Spec.ShareProcessNamespace { + return fmt.Errorf("cannot terminate a process-namespace-shared Pod %s", pod.Name) + } + if pod.Spec.HostPID { + return fmt.Errorf("cannot terminate a hostPID Pod %s", pod.Name) + } + if pod.Spec.RestartPolicy != "Never" { + return fmt.Errorf("cannot terminate pod with a %q restart policy", pod.Spec.RestartPolicy) + } + err := c.KillContainer(pod, &s, sig) + if err != nil { + return err + } } - return c.KillContainer(pod, container, sig) + return nil } // KillGracefully kills a container gracefully. -func KillGracefully(ctx context.Context, c KubernetesClientInterface, containerID string, terminationGracePeriodDuration time.Duration) error { - log.Infof("SIGTERM containerID %q: %s", containerID, syscall.SIGTERM.String()) - err := TerminatePodWithContainerID(ctx, c, containerID, syscall.SIGTERM) +func KillGracefully(ctx context.Context, c KubernetesClientInterface, containerNames []string, terminationGracePeriodDuration time.Duration) error { + log.Infof("SIGTERM containers %s: %s", strings.Join(containerNames, ","), syscall.SIGTERM.String()) + err := TerminatePodWithContainerNames(ctx, c, containerNames, syscall.SIGTERM) if err != nil { return err } - err = WaitForTermination(ctx, c, containerID, terminationGracePeriodDuration) + err = WaitForTermination(ctx, c, containerNames, terminationGracePeriodDuration) if err == nil { - log.Infof("ContainerID %q successfully killed", containerID) + log.Infof("Containers %s successfully killed", strings.Join(containerNames, ",")) return nil } - log.Infof("SIGKILL containerID %q: %s", containerID, syscall.SIGKILL.String()) - err = TerminatePodWithContainerID(ctx, c, containerID, syscall.SIGKILL) + log.Infof("SIGKILL containers %s: %s", strings.Join(containerNames, ","), syscall.SIGKILL.String()) + err = TerminatePodWithContainerNames(ctx, c, containerNames, syscall.SIGKILL) if err != nil { return err } - err = WaitForTermination(ctx, c, containerID, terminationGracePeriodDuration) + err = WaitForTermination(ctx, c, containerNames, terminationGracePeriodDuration) if err != nil { return err } - log.Infof("ContainerID %q successfully killed", containerID) + log.Infof("Containers %s successfully killed", strings.Join(containerNames, ",")) return nil } // CopyArchive downloads files and directories as a tarball and saves it to a specified path. -func CopyArchive(ctx context.Context, c KubernetesClientInterface, containerID, sourcePath, destPath string) error { - log.Infof("Archiving %s:%s to %s", containerID, sourcePath, destPath) - b, err := c.CreateArchive(ctx, containerID, sourcePath) +func CopyArchive(ctx context.Context, c KubernetesClientInterface, containerName, sourcePath, destPath string) error { + log.Infof("Archiving %s:%s to %s", containerName, sourcePath, destPath) + b, err := c.CreateArchive(ctx, containerName, sourcePath) if err != nil { return err } diff --git a/workflow/executor/common/common_test.go b/workflow/executor/common/common_test.go index 330e32cf707d..8571a01b6e25 100644 --- a/workflow/executor/common/common_test.go +++ b/workflow/executor/common/common_test.go @@ -20,7 +20,11 @@ type MockKC struct { killContainerError error } -func (m *MockKC) GetContainerStatus(ctx context.Context, containerID string) (*v1.Pod, *v1.ContainerStatus, error) { +func (m *MockKC) GetContainerStatuses(ctx context.Context) (*v1.Pod, []v1.ContainerStatus, error) { + return m.getContainerStatusPod, []v1.ContainerStatus{*m.getContainerStatusContainerStatus}, m.getContainerStatusErr +} + +func (m *MockKC) GetContainerStatus(ctx context.Context, containerName string) (*v1.Pod, *v1.ContainerStatus, error) { return m.getContainerStatusPod, m.getContainerStatusContainerStatus, m.getContainerStatusErr } @@ -28,12 +32,12 @@ func (m *MockKC) KillContainer(pod *v1.Pod, container *v1.ContainerStatus, sig s return m.killContainerError } -func (*MockKC) CreateArchive(ctx context.Context, containerID, sourcePath string) (*bytes.Buffer, error) { +func (*MockKC) CreateArchive(ctx context.Context, containerName, sourcePath string) (*bytes.Buffer, error) { return nil, nil } // TestScriptTemplateWithVolume ensure we can a script pod with input artifacts -func TestTerminatePodWithContainerID(t *testing.T) { +func TestTerminatePodWithContainerName(t *testing.T) { // Already terminated. mock := &MockKC{ getContainerStatusContainerStatus: &v1.ContainerStatus{ @@ -43,7 +47,7 @@ func TestTerminatePodWithContainerID(t *testing.T) { }, } ctx := context.Background() - err := TerminatePodWithContainerID(ctx, mock, "container-id", syscall.SIGTERM) + err := TerminatePodWithContainerNames(ctx, mock, []string{"container-name"}, syscall.SIGTERM) assert.NoError(t, err) // w/ ShareProcessNamespace. @@ -57,12 +61,13 @@ func TestTerminatePodWithContainerID(t *testing.T) { }, }, getContainerStatusContainerStatus: &v1.ContainerStatus{ + Name: "container-name", State: v1.ContainerState{ Terminated: nil, }, }, } - err = TerminatePodWithContainerID(ctx, mock, "container-id", syscall.SIGTERM) + err = TerminatePodWithContainerNames(ctx, mock, []string{"container-name"}, syscall.SIGTERM) assert.EqualError(t, err, "cannot terminate a process-namespace-shared Pod foo") // w/ HostPID. @@ -76,12 +81,13 @@ func TestTerminatePodWithContainerID(t *testing.T) { }, }, getContainerStatusContainerStatus: &v1.ContainerStatus{ + Name: "container-name", State: v1.ContainerState{ Terminated: nil, }, }, } - err = TerminatePodWithContainerID(ctx, mock, "container-id", syscall.SIGTERM) + err = TerminatePodWithContainerNames(ctx, mock, []string{"container-name"}, syscall.SIGTERM) assert.EqualError(t, err, "cannot terminate a hostPID Pod foo") // w/ RestartPolicy. @@ -95,12 +101,13 @@ func TestTerminatePodWithContainerID(t *testing.T) { }, }, getContainerStatusContainerStatus: &v1.ContainerStatus{ + Name: "container-name", State: v1.ContainerState{ Terminated: nil, }, }, } - err = TerminatePodWithContainerID(ctx, mock, "container-id", syscall.SIGTERM) + err = TerminatePodWithContainerNames(ctx, mock, []string{"container-name"}, syscall.SIGTERM) assert.EqualError(t, err, "cannot terminate pod with a \"Always\" restart policy") // Successfully call KillContainer of the client interface. @@ -114,12 +121,13 @@ func TestTerminatePodWithContainerID(t *testing.T) { }, }, getContainerStatusContainerStatus: &v1.ContainerStatus{ + Name: "container-name", State: v1.ContainerState{ Terminated: nil, }, }, } - err = TerminatePodWithContainerID(ctx, mock, "container-id", syscall.SIGTERM) + err = TerminatePodWithContainerNames(ctx, mock, []string{"container-name"}, syscall.SIGTERM) assert.NoError(t, err) } @@ -128,24 +136,26 @@ func TestWaitForTermination(t *testing.T) { // Successfully SIGTERM Container mock := &MockKC{ getContainerStatusContainerStatus: &v1.ContainerStatus{ + Name: "container-name", State: v1.ContainerState{ Terminated: &v1.ContainerStateTerminated{}, }, }, } ctx := context.Background() - err := WaitForTermination(ctx, mock, "container-id", time.Duration(2)*time.Second) + err := WaitForTermination(ctx, mock, []string{"container-name"}, time.Duration(10)*time.Second) assert.NoError(t, err) // Fail SIGTERM Container mock = &MockKC{ getContainerStatusContainerStatus: &v1.ContainerStatus{ + Name: "container-name", State: v1.ContainerState{ Terminated: nil, }, }, } - err = WaitForTermination(ctx, mock, "container-id", time.Duration(1)*time.Second) + err = WaitForTermination(ctx, mock, []string{"container-name"}, time.Duration(1)*time.Second) assert.EqualError(t, err, "timeout after 1s") } @@ -162,12 +172,13 @@ func TestKillGracefully(t *testing.T) { }, }, getContainerStatusContainerStatus: &v1.ContainerStatus{ + Name: "container-name", State: v1.ContainerState{ Terminated: nil, }, }, } ctx := context.Background() - err := KillGracefully(ctx, mock, "container-id", 1) - assert.EqualError(t, err, "timeout after 1ns") + err := KillGracefully(ctx, mock, []string{"container-name"}, time.Second) + assert.EqualError(t, err, "timeout after 1s") } diff --git a/workflow/executor/common/wait/wait.go b/workflow/executor/common/wait/wait.go deleted file mode 100644 index 4bf2351460a1..000000000000 --- a/workflow/executor/common/wait/wait.go +++ /dev/null @@ -1,58 +0,0 @@ -package wait - -import ( - "context" - "fmt" - - log "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/kubernetes/typed/core/v1" - - "github.com/argoproj/argo-workflows/v3/workflow/executor/common" -) - -func UntilTerminated(ctx context.Context, kubernetesInterface kubernetes.Interface, namespace, podName, containerID string) error { - log.Infof("Waiting for container %s to be terminated", containerID) - podInterface := kubernetesInterface.CoreV1().Pods(namespace) - listOptions := metav1.ListOptions{FieldSelector: "metadata.name=" + podName} - for { - done, err := untilTerminatedAux(ctx, podInterface, containerID, listOptions) - if done { - return err - } - } -} - -func untilTerminatedAux(ctx context.Context, podInterface v1.PodInterface, containerID string, listOptions metav1.ListOptions) (bool, error) { - for { - timedOut, done, err := doWatch(ctx, podInterface, containerID, listOptions) - if !timedOut { - return done, err - } - log.Infof("Pod watch timed out, restarting watch on %s", containerID) - } -} - -func doWatch(ctx context.Context, podInterface v1.PodInterface, containerID string, listOptions metav1.ListOptions) (bool, bool, error) { - w, err := podInterface.Watch(ctx, listOptions) - if err != nil { - return false, true, fmt.Errorf("could not watch pod: %w", err) - } - defer w.Stop() - for event := range w.ResultChan() { - pod, ok := event.Object.(*corev1.Pod) - if !ok { - return false, false, apierrors.FromObject(event.Object) - } - for _, s := range pod.Status.ContainerStatuses { - if common.GetContainerID(&s) == containerID && s.State.Terminated != nil { - return false, true, nil - } - } - listOptions.ResourceVersion = pod.ResourceVersion - } - return true, false, nil -} diff --git a/workflow/executor/docker/docker.go b/workflow/executor/docker/docker.go index 9c060a034f4e..90c404b1498b 100644 --- a/workflow/executor/docker/docker.go +++ b/workflow/executor/docker/docker.go @@ -23,19 +23,29 @@ import ( "github.com/argoproj/argo-workflows/v3/workflow/common" ) -type DockerExecutor struct{} +var errContainerNotExist = fmt.Errorf("container does not exist") // sentinel error -func NewDockerExecutor() (*DockerExecutor, error) { +type DockerExecutor struct { + namespace string + podName string + containers map[string]string // containerName -> containerID +} + +func NewDockerExecutor(namespace, podName string) (*DockerExecutor, error) { log.Infof("Creating a docker executor") - return &DockerExecutor{}, nil + return &DockerExecutor{namespace, podName, make(map[string]string)}, nil } -func (d *DockerExecutor) GetFileContents(containerID string, sourcePath string) (string, error) { +func (d *DockerExecutor) GetFileContents(containerName string, sourcePath string) (string, error) { // Uses docker cp command to return contents of the file // NOTE: docker cp CONTAINER:SRC_PATH DEST_PATH|- streams the contents of the resource // as a tar archive to STDOUT if using - as DEST_PATH. Thus, we need to extract the // content from the tar archive and output into stdout. In this way, we do not need to // create and copy the content into a file from the wait container. + containerID, err := d.getContainerID(containerName) + if err != nil { + return "", err + } dockerCpCmd := fmt.Sprintf("docker cp -a %s:%s - | tar -ax -O", containerID, sourcePath) out, err := common.RunShellCommand(dockerCpCmd) if err != nil { @@ -44,11 +54,14 @@ func (d *DockerExecutor) GetFileContents(containerID string, sourcePath string) return string(out), nil } -func (d *DockerExecutor) CopyFile(containerID string, sourcePath string, destPath string, compressionLevel int) error { - log.Infof("Archiving %s:%s to %s", containerID, sourcePath, destPath) - +func (d *DockerExecutor) CopyFile(containerName string, sourcePath string, destPath string, compressionLevel int) error { + log.Infof("Archiving %s:%s to %s", containerName, sourcePath, destPath) + containerID, err := d.getContainerID(containerName) + if err != nil { + return err + } dockerCpCmd := getDockerCpCmd(containerID, sourcePath, compressionLevel, destPath) - _, err := common.RunShellCommand(dockerCpCmd) + _, err = common.RunShellCommand(dockerCpCmd) if err != nil { return err } @@ -83,7 +96,11 @@ func (c *cmdCloser) Close() error { return nil } -func (d *DockerExecutor) GetOutputStream(ctx context.Context, containerID string, combinedOutput bool) (io.ReadCloser, error) { +func (d *DockerExecutor) GetOutputStream(ctx context.Context, containerName string, combinedOutput bool) (io.ReadCloser, error) { + containerID, err := d.getContainerID(containerName) + if err != nil { + return nil, err + } cmd := exec.Command("docker", "logs", containerID) log.Info(cmd.Args) @@ -130,7 +147,11 @@ func (d *DockerExecutor) GetOutputStream(ctx context.Context, containerID string return &cmdCloser{Reader: reader, cmd: cmd}, nil } -func (d *DockerExecutor) GetExitCode(ctx context.Context, containerID string) (string, error) { +func (d *DockerExecutor) GetExitCode(ctx context.Context, containerName string) (string, error) { + containerID, err := d.getContainerID(containerName) + if err != nil { + return "", err + } cmd := exec.Command("docker", "inspect", containerID, "--format='{{.State.ExitCode}}'") reader, err := cmd.StdoutPipe() if err != nil { @@ -161,22 +182,91 @@ func (d *DockerExecutor) GetExitCode(ctx context.Context, containerID string) (s return exitCode, nil } -func (d *DockerExecutor) WaitInit() error { - return nil +func (d *DockerExecutor) Wait(ctx context.Context, containerNames, sidecarNames []string) error { + err := d.syncContainerIDs(ctx, append(containerNames, sidecarNames...)) + if err != nil { + return err + } + containerIDs, err := d.getContainerIDs(containerNames) + if err != nil { + return err + } + _, err = common.RunCommand("docker", append([]string{"wait"}, containerIDs...)...) + return err } -// Wait for the container to complete -func (d *DockerExecutor) Wait(ctx context.Context, containerID string) error { - _, err := common.RunCommand("docker", "wait", containerID) - return err +func (d *DockerExecutor) syncContainerIDs(ctx context.Context, containerNames []string) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + output, err := common.RunCommand( + "docker", + "ps", + "--all", // container could have already exited, but there could also have been two containers for the same pod (old container not yet cleaned-up) + "--no-trunc", // display long container IDs + "--format={{.Label \"io.kubernetes.container.name\"}}={{.ID}}", + // https://github.com/kubernetes/kubernetes/blob/ca6bdba014f0a98efe0e0dd4e15f57d1c121d6c9/pkg/kubelet/dockertools/labels.go#L37 + "--filter=label=io.kubernetes.pod.namespace="+d.namespace, + "--filter=label=io.kubernetes.pod.name="+d.podName, + ) + if err != nil { + return err + } + for _, l := range strings.Split(string(output), "\n") { + parts := strings.Split(strings.TrimSpace(l), "=") + if len(parts) != 2 { + continue + } + containerName := parts[0] + containerID := parts[1] + if d.containers[containerName] == "" && containerID != "" { + d.containers[containerName] = containerID + log.Infof("mapped container name %q to container ID %q", containerName, containerID) + } + } + if d.haveContainers(containerNames) { + return nil + } + } + time.Sleep(1 * time.Second) // this is a hard-loop because containers can run very short periods of time + } +} + +func (d *DockerExecutor) haveContainers(containerNames []string) bool { + for _, n := range containerNames { + if d.containers[n] == "" { + return false + } + } + return true } -// killContainers kills a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period -func (d *DockerExecutor) Kill(ctx context.Context, containerIDs []string, terminationGracePeriodDuration time.Duration) error { +func (d *DockerExecutor) getContainerID(containerName string) (string, error) { + if containerID, ok := d.containers[containerName]; ok { + return containerID, nil + } + return "", errContainerNotExist +} + +// killContainers kills a list of containerNames first with a SIGTERM then with a SIGKILL after a grace period +func (d *DockerExecutor) Kill(ctx context.Context, containerNames []string, terminationGracePeriodDuration time.Duration) error { + + containerIDs, err := d.getContainerIDs(containerNames) + if err != nil { + return err + } + + if len(containerIDs) == 0 { // they may have already terminated + log.Info("zero container IDs, assuming all containers have exited successfully") + return nil + } + killArgs := append([]string{"kill", "--signal", "TERM"}, containerIDs...) // docker kill will return with an error if a container has terminated already, which is not an error in this case. // We therefore ignore any error. docker wait that follows will re-raise any other error with the container. - _, err := common.RunCommand("docker", killArgs...) + _, err = common.RunCommand("docker", killArgs...) if err != nil { log.Warningf("Ignored error from 'docker kill --signal TERM': %s", err) } @@ -214,6 +304,21 @@ func (d *DockerExecutor) Kill(ctx context.Context, containerIDs []string, termin return nil } +func (d *DockerExecutor) getContainerIDs(containerNames []string) ([]string, error) { + var containerIDs []string + for _, n := range containerNames { + containerID, err := d.getContainerID(n) + if err == errContainerNotExist { + continue + } + if err != nil { + return nil, err + } + containerIDs = append(containerIDs, containerID) + } + return containerIDs, nil +} + // getDockerCpCmd uses os-specific code to run `docker cp` and gzip/7zip to copy gzipped data from another // container. func getDockerCpCmd(containerID, sourcePath string, compressionLevel int, destPath string) string { diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index 3aa4d7a78186..64402bbdbab2 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -23,9 +23,7 @@ import ( log "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "github.com/argoproj/argo-workflows/v3/errors" @@ -69,8 +67,6 @@ type WorkflowExecutor struct { ExecutionControl *common.ExecutionControl RuntimeExecutor ContainerRuntimeExecutor - // memoized container ID to prevent multiple lookups - mainContainerID string // memoized configmaps memoizedConfigMaps map[string]string // memoized secrets @@ -85,28 +81,25 @@ type WorkflowExecutor struct { // ContainerRuntimeExecutor is the interface for interacting with a container runtime (e.g. docker) type ContainerRuntimeExecutor interface { // GetFileContents returns the file contents of a file in a container as a string - GetFileContents(containerID string, sourcePath string) (string, error) + GetFileContents(containerName string, sourcePath string) (string, error) // CopyFile copies a source file in a container to a local path - CopyFile(containerID string, sourcePath string, destPath string, compressionLevel int) error + CopyFile(containerName, sourcePath, destPath string, compressionLevel int) error // GetOutputStream returns the entirety of the container output as a io.Reader // Used to capture script results as an output parameter, and to archive container logs - GetOutputStream(ctx context.Context, containerID string, combinedOutput bool) (io.ReadCloser, error) + GetOutputStream(ctx context.Context, containerName string, combinedOutput bool) (io.ReadCloser, error) // GetExitCode returns the exit code of the container // Used to capture script exit code as an output parameter - GetExitCode(ctx context.Context, containerID string) (string, error) + GetExitCode(ctx context.Context, containerName string) (string, error) - // WaitInit is called before Wait() to signal the executor about an impending Wait call. - // For most executors this is a noop, and is only used by the the PNS executor - WaitInit() error + // Wait waits for the container to complete. + // The implementation should not wait for the sidecars. These are included in case you need to capture data on them. + Wait(ctx context.Context, containerNames, sidecarNames []string) error - // Wait waits for the container to complete - Wait(ctx context.Context, containerID string) error - - // Kill a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period - Kill(ctx context.Context, containerIDs []string, terminationGracePeriodDuration time.Duration) error + // Kill a list of containers first with a SIGTERM then with a SIGKILL after a grace period + Kill(ctx context.Context, containerNames []string, terminationGracePeriodDuration time.Duration) error } // NewExecutor instantiates a new workflow executor @@ -268,18 +261,13 @@ func (we *WorkflowExecutor) SaveArtifacts(ctx context.Context) error { return nil } log.Infof("Saving output artifacts") - mainCtrID, err := we.GetMainContainerID(ctx) - if err != nil { - return err - } - - err = os.MkdirAll(tempOutArtDir, os.ModePerm) + err := os.MkdirAll(tempOutArtDir, os.ModePerm) if err != nil { return errors.InternalWrapError(err) } for i, art := range we.Template.Outputs.Artifacts { - err := we.saveArtifact(ctx, mainCtrID, &art) + err := we.saveArtifact(ctx, common.MainContainerName, &art) if err != nil { return err } @@ -288,12 +276,12 @@ func (we *WorkflowExecutor) SaveArtifacts(ctx context.Context) error { return nil } -func (we *WorkflowExecutor) saveArtifact(ctx context.Context, mainCtrID string, art *wfv1.Artifact) error { +func (we *WorkflowExecutor) saveArtifact(ctx context.Context, containerName string, art *wfv1.Artifact) error { // Determine the file path of where to find the artifact if art.Path == "" { return errors.InternalErrorf("Artifact %s did not specify a path", art.Name) } - fileName, localArtPath, err := we.stageArchiveFile(mainCtrID, art) + fileName, localArtPath, err := we.stageArchiveFile(containerName, art) if err != nil { if art.Optional && errors.IsCode(errors.CodeNotFound, err) { log.Warnf("Ignoring optional artifact '%s' which does not exist in path '%s': %v", art.Name, art.Path, err) @@ -354,7 +342,7 @@ func (we *WorkflowExecutor) maybeDeleteLocalArtPath(localArtPath string) { // The filename is incorporated into the final path when uploading it to the artifact repo. // The local path is the final staging location of the file (or directory) which we will pass // to the SaveArtifacts call and may be a directory or file. -func (we *WorkflowExecutor) stageArchiveFile(mainCtrID string, art *wfv1.Artifact) (string, string, error) { +func (we *WorkflowExecutor) stageArchiveFile(containerName string, art *wfv1.Artifact) (string, string, error) { log.Infof("Staging artifact: %s", art.Name) strategy := art.Archive if strategy == nil { @@ -405,7 +393,7 @@ func (we *WorkflowExecutor) stageArchiveFile(mainCtrID string, art *wfv1.Artifac localArtPath := filepath.Join(tempOutArtDir, fileName) log.Infof("Copying %s from container base image layer to %s", art.Path, localArtPath) - err := we.RuntimeExecutor.CopyFile(mainCtrID, art.Path, localArtPath, compressionLevel) + err := we.RuntimeExecutor.CopyFile(containerName, art.Path, localArtPath, compressionLevel) if err != nil { return "", "", err } @@ -479,11 +467,6 @@ func (we *WorkflowExecutor) SaveParameters(ctx context.Context) error { return nil } log.Infof("Saving output parameters") - mainCtrID, err := we.GetMainContainerID(ctx) - if err != nil { - return err - } - for i, param := range we.Template.Outputs.Parameters { log.Infof("Saving path output parameter: %s", param.Name) // Determine the file path of where to find the parameter @@ -500,7 +483,7 @@ func (we *WorkflowExecutor) SaveParameters(ctx context.Context) error { continue } log.Infof("Copying %s from base image layer", param.ValueFrom.Path) - fileContents, err := we.RuntimeExecutor.GetFileContents(mainCtrID, param.ValueFrom.Path) + fileContents, err := we.RuntimeExecutor.GetFileContents(common.MainContainerName, param.ValueFrom.Path) if err != nil { // We have a default value to use instead of returning an error if param.ValueFrom.Default != nil { @@ -541,18 +524,14 @@ func (we *WorkflowExecutor) SaveLogs(ctx context.Context) (*wfv1.Artifact, error return nil, nil } log.Infof("Saving logs") - mainCtrID, err := we.GetMainContainerID(ctx) - if err != nil { - return nil, err - } tempLogsDir := "/tmp/argo/outputs/logs" - err = os.MkdirAll(tempLogsDir, os.ModePerm) + err := os.MkdirAll(tempLogsDir, os.ModePerm) if err != nil { return nil, errors.InternalWrapError(err) } fileName := "main.log" mainLog := path.Join(tempLogsDir, fileName) - err = we.saveLogToFile(ctx, mainCtrID, mainLog) + err = we.saveLogToFile(ctx, common.MainContainerName, mainLog) if err != nil { return nil, err } @@ -574,13 +553,13 @@ func (we *WorkflowExecutor) GetSecret(ctx context.Context, accessKeyName string, } // saveLogToFile saves the entire log output of a container to a local file -func (we *WorkflowExecutor) saveLogToFile(ctx context.Context, mainCtrID, path string) error { +func (we *WorkflowExecutor) saveLogToFile(ctx context.Context, containerName, path string) error { outFile, err := os.Create(path) if err != nil { return errors.InternalWrapError(err) } defer func() { _ = outFile.Close() }() - reader, err := we.RuntimeExecutor.GetOutputStream(ctx, mainCtrID, true) + reader, err := we.RuntimeExecutor.GetOutputStream(ctx, containerName, true) if err != nil { return err } @@ -689,36 +668,6 @@ func (we *WorkflowExecutor) GetTerminationGracePeriodDuration(ctx context.Contex return terminationGracePeriodDuration, nil } -// GetMainContainerStatus returns the container status of the main container, nil if the main container does not exist -func (we *WorkflowExecutor) GetMainContainerStatus(ctx context.Context) (*apiv1.ContainerStatus, error) { - pod, err := we.getPod(ctx) - if err != nil { - return nil, err - } - for _, ctrStatus := range pod.Status.ContainerStatuses { - if ctrStatus.Name == common.MainContainerName { - return &ctrStatus, nil - } - } - return nil, nil -} - -// GetMainContainerID returns the container id of the main container -func (we *WorkflowExecutor) GetMainContainerID(ctx context.Context) (string, error) { - if we.mainContainerID != "" { - return we.mainContainerID, nil - } - ctrStatus, err := we.GetMainContainerStatus(ctx) - if err != nil { - return "", err - } - if ctrStatus == nil { - return "", nil - } - we.mainContainerID = containerID(ctrStatus.ContainerID) - return we.mainContainerID, nil -} - // CaptureScriptResult will add the stdout of a script template as output result func (we *WorkflowExecutor) CaptureScriptResult(ctx context.Context) error { @@ -731,11 +680,7 @@ func (we *WorkflowExecutor) CaptureScriptResult(ctx context.Context) error { return nil } log.Infof("Capturing script output") - mainContainerID, err := we.GetMainContainerID(ctx) - if err != nil { - return err - } - reader, err := we.RuntimeExecutor.GetOutputStream(ctx, mainContainerID, false) + reader, err := we.RuntimeExecutor.GetOutputStream(ctx, common.MainContainerName, false) if err != nil { return err } @@ -769,11 +714,7 @@ func (we *WorkflowExecutor) CaptureScriptExitCode(ctx context.Context) error { return nil } log.Infof("Capturing script exit code") - mainContainerID, err := we.GetMainContainerID(ctx) - if err != nil { - return err - } - exitCode, err := we.RuntimeExecutor.GetExitCode(ctx, mainContainerID) + exitCode, err := we.RuntimeExecutor.GetExitCode(ctx, common.MainContainerName) if err != nil { return err } @@ -976,95 +917,24 @@ func chmod(artPath string, mode int32, recurse bool) error { return nil } -// containerID is a convenience function to strip the 'docker://', 'containerd://' from k8s ContainerID string -func containerID(ctrID string) string { - schemeIndex := strings.Index(ctrID, "://") - if schemeIndex == -1 { - return ctrID - } - return ctrID[schemeIndex+3:] -} - // Wait is the sidecar container logic which waits for the main container to complete. // Also monitors for updates in the pod annotations which may change (e.g. terminate) // Upon completion, kills any sidecars after it finishes. func (we *WorkflowExecutor) Wait(ctx context.Context) error { - err := we.RuntimeExecutor.WaitInit() - if err != nil { - return err - } - log.Infof("Waiting on main container") - mainContainerID, err := we.waitMainContainerStart(ctx) - if err != nil { - return err - } - log.Infof("main container started with container ID: %s", mainContainerID) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - + containerNames := []string{common.MainContainerName} annotationUpdatesCh := we.monitorAnnotations(ctx) - go we.monitorDeadline(ctx, annotationUpdatesCh) - - err = waitutil.Backoff(ExecutorRetry, func() (bool, error) { - err := we.RuntimeExecutor.Wait(ctx, mainContainerID) + go we.monitorDeadline(ctx, containerNames, annotationUpdatesCh) + err := waitutil.Backoff(ExecutorRetry, func() (bool, error) { + err := we.RuntimeExecutor.Wait(ctx, containerNames, we.Template.GetSidecarNames()) return err == nil, err }) if err != nil { - return err + return fmt.Errorf("failed to wait for main container to complete: %w", err) } log.Infof("Main container completed") return nil } -// waitMainContainerStart waits for the main container to start and returns its container ID. -func (we *WorkflowExecutor) waitMainContainerStart(ctx context.Context) (string, error) { - for { - podsIf := we.ClientSet.CoreV1().Pods(we.Namespace) - fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", we.PodName)) - opts := metav1.ListOptions{ - FieldSelector: fieldSelector.String(), - } - - var watchIf watch.Interface - - err := waitutil.Backoff(ExecutorRetry, func() (bool, error) { - var err error - watchIf, err = podsIf.Watch(ctx, opts) - return !errorsutil.IsTransientErr(err), err - }) - if err != nil { - return "", errors.InternalWrapErrorf(err, "Failed to establish pod watch: %v", err) - } - for watchEv := range watchIf.ResultChan() { - if watchEv.Type == watch.Error { - return "", errors.InternalErrorf("Pod watch error waiting for main to start: %v", watchEv.Object) - } - pod, ok := watchEv.Object.(*apiv1.Pod) - if !ok { - log.Warnf("Pod watch returned non pod object: %v", watchEv.Object) - continue - } - for _, ctrStatus := range pod.Status.ContainerStatuses { - if ctrStatus.Name == common.MainContainerName { - log.Debug(ctrStatus) - if ctrStatus.State.Waiting != nil { - // main container is still in waiting status - } else if ctrStatus.State.Waiting == nil && ctrStatus.State.Running == nil && ctrStatus.State.Terminated == nil { - // status still not ready, wait - } else if ctrStatus.ContainerID != "" { - we.mainContainerID = containerID(ctrStatus.ContainerID) - return containerID(ctrStatus.ContainerID), nil - } else { - // main container in running or terminated state but missing container ID - return "", errors.InternalError("Main container ID cannot be found") - } - } - } - } - log.Warnf("Pod watch closed unexpectedly") - } -} - func watchFileChanges(ctx context.Context, pollInterval time.Duration, filePath string) <-chan struct{} { res := make(chan struct{}) go func() { @@ -1104,7 +974,10 @@ func (we *WorkflowExecutor) monitorAnnotations(ctx context.Context) <-chan struc sigs := make(chan os.Signal, 1) signal.Notify(sigs, os_specific.GetOsSignal()) - we.setExecutionControl(ctx) + err := we.LoadExecutionControl() // this is much cheaper than doing `get pod` + if err != nil { + log.Errorf("Failed to reload execution control from annotations: %v", err) + } // Create a channel which will notify a listener on new updates to the annotations annotationUpdateCh := make(chan struct{}) @@ -1164,7 +1037,7 @@ func (we *WorkflowExecutor) setExecutionControl(ctx context.Context) { // monitorDeadline checks to see if we exceeded the deadline for the step and // terminates the main container if we did -func (we *WorkflowExecutor) monitorDeadline(ctx context.Context, annotationsUpdate <-chan struct{}) { +func (we *WorkflowExecutor) monitorDeadline(ctx context.Context, containerNames []string, annotationsUpdate <-chan struct{}) { log.Infof("Starting deadline monitor") for { select { @@ -1189,9 +1062,8 @@ func (we *WorkflowExecutor) monitorDeadline(ctx context.Context, annotationsUpda log.Info(message) _ = we.AddAnnotation(ctx, common.AnnotationKeyNodeMessage, message) log.Infof("Killing main container") - mainContainerID, _ := we.GetMainContainerID(ctx) terminationGracePeriodDuration, _ := we.GetTerminationGracePeriodDuration(ctx) - err := we.RuntimeExecutor.Kill(ctx, []string{mainContainerID}, terminationGracePeriodDuration) + err := we.RuntimeExecutor.Kill(ctx, containerNames, terminationGracePeriodDuration) if err != nil { log.Warnf("Failed to kill main container: %v", err) } @@ -1205,28 +1077,10 @@ func (we *WorkflowExecutor) monitorDeadline(ctx context.Context, annotationsUpda // KillSidecars kills any sidecars to the main container func (we *WorkflowExecutor) KillSidecars(ctx context.Context) error { - log.Infof("Killing sidecars") - pod, err := we.getPod(ctx) - if err != nil { - return err - } - sidecarIDs := make([]string, 0) - for _, ctrStatus := range pod.Status.ContainerStatuses { - if ctrStatus.Name == common.MainContainerName || ctrStatus.Name == common.WaitContainerName { - continue - } - if ctrStatus.State.Terminated != nil { - continue - } - containerID := containerID(ctrStatus.ContainerID) - log.Infof("Killing sidecar %s (%s)", ctrStatus.Name, containerID) - sidecarIDs = append(sidecarIDs, containerID) - } - if len(sidecarIDs) == 0 { - return nil - } + sidecarNames := we.Template.GetSidecarNames() + log.Infof("Killing sidecars %s", strings.Join(sidecarNames, ",")) terminationGracePeriodDuration, _ := we.GetTerminationGracePeriodDuration(ctx) - return we.RuntimeExecutor.Kill(ctx, sidecarIDs, terminationGracePeriodDuration) + return we.RuntimeExecutor.Kill(ctx, sidecarNames, terminationGracePeriodDuration) } // LoadExecutionControl reads the execution control definition from the the Kubernetes downward api annotations volume file diff --git a/workflow/executor/executor_test.go b/workflow/executor/executor_test.go index 438adaa1062f..0ce37ba1ee93 100644 --- a/workflow/executor/executor_test.go +++ b/workflow/executor/executor_test.go @@ -16,10 +16,10 @@ import ( ) const ( - fakePodName = "fake-test-pod-1234567890" - fakeNamespace = "default" - fakeAnnotations = "/tmp/podannotationspath" - fakeContainerID = "abc123" + fakePodName = "fake-test-pod-1234567890" + fakeNamespace = "default" + fakeAnnotations = "/tmp/podannotationspath" + fakeContainerName = "main" ) func TestSaveParameters(t *testing.T) { @@ -45,9 +45,8 @@ func TestSaveParameters(t *testing.T) { PodAnnotationsPath: fakeAnnotations, ExecutionControl: nil, RuntimeExecutor: &mockRuntimeExecutor, - mainContainerID: fakeContainerID, } - mockRuntimeExecutor.On("GetFileContents", fakeContainerID, "/path").Return("has a newline\n", nil) + mockRuntimeExecutor.On("GetFileContents", fakeContainerName, "/path").Return("has a newline\n", nil) ctx := context.Background() err := we.SaveParameters(ctx) @@ -135,9 +134,8 @@ func TestDefaultParameters(t *testing.T) { PodAnnotationsPath: fakeAnnotations, ExecutionControl: nil, RuntimeExecutor: &mockRuntimeExecutor, - mainContainerID: fakeContainerID, } - mockRuntimeExecutor.On("GetFileContents", fakeContainerID, "/path").Return("", fmt.Errorf("file not found")) + mockRuntimeExecutor.On("GetFileContents", fakeContainerName, "/path").Return("", fmt.Errorf("file not found")) ctx := context.Background() err := we.SaveParameters(ctx) @@ -169,9 +167,8 @@ func TestDefaultParametersEmptyString(t *testing.T) { PodAnnotationsPath: fakeAnnotations, ExecutionControl: nil, RuntimeExecutor: &mockRuntimeExecutor, - mainContainerID: fakeContainerID, } - mockRuntimeExecutor.On("GetFileContents", fakeContainerID, "/path").Return("", fmt.Errorf("file not found")) + mockRuntimeExecutor.On("GetFileContents", fakeContainerName, "/path").Return("", fmt.Errorf("file not found")) ctx := context.Background() err := we.SaveParameters(ctx) @@ -329,7 +326,6 @@ func TestSaveArtifacts(t *testing.T) { PodAnnotationsPath: fakeAnnotations, ExecutionControl: nil, RuntimeExecutor: &mockRuntimeExecutor, - mainContainerID: fakeContainerID, } ctx := context.Background() diff --git a/workflow/executor/k8sapi/client.go b/workflow/executor/k8sapi/client.go index 420057e147ed..6f9dfe81b02d 100644 --- a/workflow/executor/k8sapi/client.go +++ b/workflow/executor/k8sapi/client.go @@ -9,17 +9,21 @@ import ( "time" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "github.com/argoproj/argo-workflows/v3/errors" + errorsutil "github.com/argoproj/argo-workflows/v3/util/errors" + waitutil "github.com/argoproj/argo-workflows/v3/util/wait" "github.com/argoproj/argo-workflows/v3/workflow/common" execcommon "github.com/argoproj/argo-workflows/v3/workflow/executor/common" ) type k8sAPIClient struct { - clientset *kubernetes.Clientset + clientset kubernetes.Interface config *restclient.Config podName string namespace string @@ -27,7 +31,7 @@ type k8sAPIClient struct { var _ execcommon.KubernetesClientInterface = &k8sAPIClient{} -func newK8sAPIClient(clientset *kubernetes.Clientset, config *restclient.Config, podName, namespace string) (*k8sAPIClient, error) { +func newK8sAPIClient(clientset kubernetes.Interface, config *restclient.Config, podName, namespace string) (*k8sAPIClient, error) { return &k8sAPIClient{ clientset: clientset, config: config, @@ -36,13 +40,9 @@ func newK8sAPIClient(clientset *kubernetes.Clientset, config *restclient.Config, }, nil } -func (c *k8sAPIClient) CreateArchive(ctx context.Context, containerID, sourcePath string) (*bytes.Buffer, error) { - _, containerStatus, err := c.GetContainerStatus(ctx, containerID) - if err != nil { - return nil, err - } +func (c *k8sAPIClient) CreateArchive(ctx context.Context, containerName, sourcePath string) (*bytes.Buffer, error) { command := []string{"tar", "cf", "-", sourcePath} - exec, err := common.ExecPodContainer(c.config, c.namespace, c.podName, containerStatus.Name, true, false, command...) + exec, err := common.ExecPodContainer(c.config, c.namespace, c.podName, containerName, true, false, command...) if err != nil { return nil, err } @@ -53,31 +53,47 @@ func (c *k8sAPIClient) CreateArchive(ctx context.Context, containerID, sourcePat return stdOut, nil } -func (c *k8sAPIClient) getLogsAsStream(ctx context.Context, containerID string) (io.ReadCloser, error) { - _, containerStatus, err := c.GetContainerStatus(ctx, containerID) - if err != nil { - return nil, err - } +func (c *k8sAPIClient) getLogsAsStream(ctx context.Context, containerName string) (io.ReadCloser, error) { return c.clientset.CoreV1().Pods(c.namespace). - GetLogs(c.podName, &corev1.PodLogOptions{Container: containerStatus.Name, SinceTime: &metav1.Time{}}).Stream(ctx) + GetLogs(c.podName, &corev1.PodLogOptions{Container: containerName, SinceTime: &metav1.Time{}}).Stream(ctx) +} + +var backoffOver30s = wait.Backoff{ + Duration: 1 * time.Second, + Steps: 7, + Factor: 2, } func (c *k8sAPIClient) getPod(ctx context.Context) (*corev1.Pod, error) { - return c.clientset.CoreV1().Pods(c.namespace).Get(ctx, c.podName, metav1.GetOptions{}) + var pod *corev1.Pod + err := waitutil.Backoff(backoffOver30s, func() (bool, error) { + var err error + pod, err = c.clientset.CoreV1().Pods(c.namespace).Get(ctx, c.podName, metav1.GetOptions{}) + return !errorsutil.IsTransientErr(err), err + }) + return pod, err } -func (c *k8sAPIClient) GetContainerStatus(ctx context.Context, containerID string) (*corev1.Pod, *corev1.ContainerStatus, error) { - pod, err := c.getPod(ctx) +func (c *k8sAPIClient) GetContainerStatus(ctx context.Context, containerName string) (*corev1.Pod, *corev1.ContainerStatus, error) { + pod, containerStatuses, err := c.GetContainerStatuses(ctx) if err != nil { return nil, nil, err } - for _, containerStatus := range pod.Status.ContainerStatuses { - if execcommon.GetContainerID(&containerStatus) != containerID { + for _, s := range containerStatuses { + if s.Name != containerName { continue } - return pod, &containerStatus, nil + return pod, &s, nil } - return nil, nil, errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod %s", containerID, c.podName)) + return nil, nil, errors.New(errors.CodeNotFound, fmt.Sprintf("container %q is not found in the pod %s", containerName, c.podName)) +} + +func (c *k8sAPIClient) GetContainerStatuses(ctx context.Context) (*corev1.Pod, []corev1.ContainerStatus, error) { + pod, err := c.getPod(ctx) + if err != nil { + return nil, nil, err + } + return pod, pod.Status.ContainerStatuses, nil } func (c *k8sAPIClient) KillContainer(pod *corev1.Pod, container *corev1.ContainerStatus, sig syscall.Signal) error { @@ -90,6 +106,40 @@ func (c *k8sAPIClient) KillContainer(pod *corev1.Pod, container *corev1.Containe return err } -func (c *k8sAPIClient) killGracefully(ctx context.Context, containerID string, terminationGracePeriodDuration time.Duration) error { - return execcommon.KillGracefully(ctx, c, containerID, terminationGracePeriodDuration) +func (c *k8sAPIClient) killGracefully(ctx context.Context, containerNames []string, terminationGracePeriodDuration time.Duration) error { + return execcommon.KillGracefully(ctx, c, containerNames, terminationGracePeriodDuration) +} + +func (c *k8sAPIClient) until(ctx context.Context, f func(pod *corev1.Pod) bool) error { + podInterface := c.clientset.CoreV1().Pods(c.namespace) + for { + done, err := func() (bool, error) { + w, err := podInterface.Watch(ctx, metav1.ListOptions{FieldSelector: "metadata.name=" + c.podName}) + if err != nil { + return false, err + } + defer w.Stop() + for { + select { + case <-ctx.Done(): + return true, ctx.Err() + case event, open := <-w.ResultChan(): + if !open { + return false, fmt.Errorf("channel not open") + } + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return false, apierrors.FromObject(event.Object) + } + done := f(pod) + if done { + return true, nil + } + } + } + }() + if done { + return err + } + } } diff --git a/workflow/executor/k8sapi/k8sapi.go b/workflow/executor/k8sapi/k8sapi.go index 28936f120b38..67ae5ed932d1 100644 --- a/workflow/executor/k8sapi/k8sapi.go +++ b/workflow/executor/k8sapi/k8sapi.go @@ -7,18 +7,19 @@ import ( "time" log "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "github.com/argoproj/argo-workflows/v3/errors" - "github.com/argoproj/argo-workflows/v3/workflow/executor/common/wait" + "github.com/argoproj/argo-workflows/v3/util/slice" ) type K8sAPIExecutor struct { client *k8sAPIClient } -func NewK8sAPIExecutor(clientset *kubernetes.Clientset, config *restclient.Config, podName, namespace string) (*K8sAPIExecutor, error) { +func NewK8sAPIExecutor(clientset kubernetes.Interface, config *restclient.Config, podName, namespace string) (*K8sAPIExecutor, error) { log.Infof("Creating a K8sAPI executor") client, err := newK8sAPIClient(clientset, config, podName, namespace) if err != nil { @@ -29,25 +30,25 @@ func NewK8sAPIExecutor(clientset *kubernetes.Clientset, config *restclient.Confi }, nil } -func (k *K8sAPIExecutor) GetFileContents(containerID string, sourcePath string) (string, error) { +func (k *K8sAPIExecutor) GetFileContents(containerName string, sourcePath string) (string, error) { return "", errors.Errorf(errors.CodeNotImplemented, "GetFileContents() is not implemented in the k8sapi executor.") } -func (k *K8sAPIExecutor) CopyFile(containerID string, sourcePath string, destPath string, compressionLevel int) error { +func (k *K8sAPIExecutor) CopyFile(containerName string, sourcePath string, destPath string, compressionLevel int) error { return errors.Errorf(errors.CodeNotImplemented, "CopyFile() is not implemented in the k8sapi executor.") } -func (k *K8sAPIExecutor) GetOutputStream(ctx context.Context, containerID string, combinedOutput bool) (io.ReadCloser, error) { - log.Infof("Getting output of %s", containerID) +func (k *K8sAPIExecutor) GetOutputStream(ctx context.Context, containerName string, combinedOutput bool) (io.ReadCloser, error) { + log.Infof("Getting output of %s", containerName) if !combinedOutput { log.Warn("non combined output unsupported") } - return k.client.getLogsAsStream(ctx, containerID) + return k.client.getLogsAsStream(ctx, containerName) } -func (k *K8sAPIExecutor) GetExitCode(ctx context.Context, containerID string) (string, error) { - log.Infof("Getting exit code of %s", containerID) - _, status, err := k.client.GetContainerStatus(ctx, containerID) +func (k *K8sAPIExecutor) GetExitCode(ctx context.Context, containerName string) (string, error) { + log.Infof("Getting exit code of %s", containerName) + _, status, err := k.client.GetContainerStatus(ctx, containerName) if err != nil { return "", errors.InternalWrapError(err, "Could not get container status") } @@ -57,23 +58,24 @@ func (k *K8sAPIExecutor) GetExitCode(ctx context.Context, containerID string) (s return "", nil } -func (k *K8sAPIExecutor) WaitInit() error { - return nil +// Wait for the container to complete +func (k *K8sAPIExecutor) Wait(ctx context.Context, containerNames, sidecarNames []string) error { + return k.Until(ctx, func(pod *corev1.Pod) bool { + for _, s := range pod.Status.ContainerStatuses { + if s.State.Terminated == nil && slice.ContainsString(containerNames, s.Name) { + return false + } + } + return true + }) } -// Wait for the container to complete -func (k *K8sAPIExecutor) Wait(ctx context.Context, containerID string) error { - return wait.UntilTerminated(ctx, k.client.clientset, k.client.namespace, k.client.podName, containerID) +func (k *K8sAPIExecutor) Until(ctx context.Context, f func(pod *corev1.Pod) bool) error { + return k.client.until(ctx, f) } -// Kill kills a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period -func (k *K8sAPIExecutor) Kill(ctx context.Context, containerIDs []string, terminationGracePeriodDuration time.Duration) error { - log.Infof("Killing containers %s", containerIDs) - for _, containerID := range containerIDs { - err := k.client.killGracefully(ctx, containerID, terminationGracePeriodDuration) - if err != nil { - return err - } - } - return nil +// Kill kills a list of containers first with a SIGTERM then with a SIGKILL after a grace period +func (k *K8sAPIExecutor) Kill(ctx context.Context, containerNames []string, terminationGracePeriodDuration time.Duration) error { + log.Infof("Killing containers %v", containerNames) + return k.client.killGracefully(ctx, containerNames, terminationGracePeriodDuration) } diff --git a/workflow/executor/k8sapi/k8sapi_test.go b/workflow/executor/k8sapi/k8sapi_test.go new file mode 100644 index 000000000000..33f669499723 --- /dev/null +++ b/workflow/executor/k8sapi/k8sapi_test.go @@ -0,0 +1,19 @@ +package k8sapi + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_backoffOver30s(t *testing.T) { + x := backoffOver30s + assert.Equal(t, 1*time.Second, x.Step()) + assert.Equal(t, 2*time.Second, x.Step()) + assert.Equal(t, 4*time.Second, x.Step()) + assert.Equal(t, 8*time.Second, x.Step()) + assert.Equal(t, 16*time.Second, x.Step()) + assert.Equal(t, 32*time.Second, x.Step()) + assert.Equal(t, 64*time.Second, x.Step()) +} diff --git a/workflow/executor/kubelet/client.go b/workflow/executor/kubelet/client.go index 2e47e4c26725..af2a6904baac 100644 --- a/workflow/executor/kubelet/client.go +++ b/workflow/executor/kubelet/client.go @@ -38,11 +38,13 @@ type kubeletClient struct { // - 127.0.0.1:10250 // - my-host.com:10250 kubeletEndpoint string + namespace string + podName string } var _ execcommon.KubernetesClientInterface = &kubeletClient{} -func newKubeletClient() (*kubeletClient, error) { +func newKubeletClient(namespace, podName string) (*kubeletClient, error) { kubeletHost := os.Getenv(common.EnvVarDownwardAPINodeIP) if kubeletHost == "" { return nil, fmt.Errorf("empty envvar %s", common.EnvVarDownwardAPINodeIP) @@ -87,6 +89,8 @@ func newKubeletClient() (*kubeletClient, error) { HandshakeTimeout: time.Second * 5, }, kubeletEndpoint: fmt.Sprintf("%s:%d", kubeletHost, kubeletPort), + namespace: namespace, + podName: podName, }, nil } @@ -99,7 +103,7 @@ func checkHTTPErr(resp *http.Response) error { return nil } -func (k *kubeletClient) getPodList() (*corev1.PodList, error) { +func (k *kubeletClient) getPod() (*corev1.Pod, error) { u, err := url.ParseRequestURI(fmt.Sprintf("https://%s/pods", k.kubeletEndpoint)) if err != nil { return nil, errors.InternalWrapError(err) @@ -112,6 +116,7 @@ func (k *kubeletClient) getPodList() (*corev1.PodList, error) { if err != nil { return nil, errors.InternalWrapError(err) } + log.Infof("List pod %d (kubelet)", resp.StatusCode) // log that we are listing pods from Kubelet err = checkHTTPErr(resp) if err != nil { return nil, err @@ -123,27 +128,23 @@ func (k *kubeletClient) getPodList() (*corev1.PodList, error) { _ = resp.Body.Close() return nil, errors.InternalWrapError(err) } - return podList, resp.Body.Close() + if err := resp.Body.Close(); err != nil { + return nil, err + } + for _, item := range podList.Items { + if item.Namespace == k.namespace && item.Name == k.podName { + return &item, nil + } + } + return nil, fmt.Errorf("pod %q is not found in the pod list", k.podName) } -func (k *kubeletClient) GetLogStream(containerID string) (io.ReadCloser, error) { - podList, err := k.getPodList() +func (k *kubeletClient) GetLogStream(containerName string) (io.ReadCloser, error) { + resp, err := k.doRequestLogs(k.namespace, k.podName, containerName) if err != nil { return nil, err } - for _, pod := range podList.Items { - for _, container := range pod.Status.ContainerStatuses { - if execcommon.GetContainerID(&container) != containerID { - continue - } - resp, err := k.doRequestLogs(pod.Namespace, pod.Name, container.Name) - if err != nil { - return nil, err - } - return resp.Body, nil - } - } - return nil, errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) + return resp.Body, nil } func (k *kubeletClient) doRequestLogs(namespace, podName, containerName string) (*http.Response, error) { @@ -166,20 +167,25 @@ func (k *kubeletClient) doRequestLogs(namespace, podName, containerName string) return resp, nil } -func (k *kubeletClient) GetContainerStatus(ctx context.Context, containerID string) (*corev1.Pod, *corev1.ContainerStatus, error) { - podList, err := k.getPodList() +func (k *kubeletClient) GetContainerStatus(ctx context.Context, containerName string) (*corev1.Pod, *corev1.ContainerStatus, error) { + pod, containerStatus, err := k.GetContainerStatuses(ctx) if err != nil { - return nil, nil, errors.InternalWrapError(err) + return nil, nil, err } - for _, pod := range podList.Items { - for _, container := range pod.Status.ContainerStatuses { - if execcommon.GetContainerID(&container) != containerID { - continue - } - return &pod, &container, nil + for _, s := range containerStatus { + if containerName == s.Name { + return pod, &s, nil } } - return nil, nil, errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) + return nil, nil, fmt.Errorf("container %q is not found in the pod", containerName) +} + +func (k *kubeletClient) GetContainerStatuses(ctx context.Context) (*corev1.Pod, []corev1.ContainerStatus, error) { + pod, err := k.getPod() + if err != nil { + return nil, nil, errors.InternalWrapError(err) + } + return pod, pod.Status.ContainerStatuses, nil } func (k *kubeletClient) exec(u *url.URL) (*url.URL, error) { @@ -242,47 +248,39 @@ func (k *kubeletClient) readFileContents(u *url.URL) (*bytes.Buffer, error) { } } -// createArchive exec in the given containerID and create a tarball of the given sourcePath. Works with directory -func (k *kubeletClient) CreateArchive(ctx context.Context, containerID, sourcePath string) (*bytes.Buffer, error) { - return k.getCommandOutput(containerID, fmt.Sprintf("command=tar&command=-cf&command=-&command=%s&output=1", sourcePath)) +// createArchive exec in the given containerName and create a tarball of the given sourcePath. Works with directory +func (k *kubeletClient) CreateArchive(ctx context.Context, containerName, sourcePath string) (*bytes.Buffer, error) { + return k.getCommandOutput(containerName, fmt.Sprintf("command=tar&command=-cf&command=-&command=%s&output=1", sourcePath)) } -// GetFileContents exec in the given containerID and cat the given sourcePath. -func (k *kubeletClient) GetFileContents(containerID, sourcePath string) (*bytes.Buffer, error) { - return k.getCommandOutput(containerID, fmt.Sprintf("command=cat&command=%s&output=1", sourcePath)) +// GetFileContents exec in the given containerName and cat the given sourcePath. +func (k *kubeletClient) GetFileContents(containerName, sourcePath string) (*bytes.Buffer, error) { + return k.getCommandOutput(containerName, fmt.Sprintf("command=cat&command=%s&output=1", sourcePath)) } -func (k *kubeletClient) getCommandOutput(containerID, command string) (*bytes.Buffer, error) { - podList, err := k.getPodList() +func (k *kubeletClient) getCommandOutput(containerName, command string) (*bytes.Buffer, error) { + pod, container, err := k.GetContainerStatus(context.Background(), containerName) if err != nil { return nil, errors.InternalWrapError(err) } - for _, pod := range podList.Items { - for _, container := range pod.Status.ContainerStatuses { - if execcommon.GetContainerID(&container) != containerID { - continue - } - if container.State.Terminated != nil { - err = fmt.Errorf("container %s is terminated: %v", container.ContainerID, container.State.Terminated.String()) - return nil, err - } - u, err := url.ParseRequestURI(fmt.Sprintf("wss://%s/exec/%s/%s/%s?%s", k.kubeletEndpoint, pod.Namespace, pod.Name, container.Name, command)) - if err != nil { - return nil, err - } - u, err = k.exec(u) - if err != nil { - return nil, err - } - return k.readFileContents(u) - } + if container.State.Terminated != nil { + return nil, fmt.Errorf("container %q is terminated: %v", containerName, container.State.Terminated.String()) + } + u, err := url.ParseRequestURI(fmt.Sprintf("wss://%s/exec/%s/%s/%s?%s", k.kubeletEndpoint, pod.Namespace, pod.Name, containerName, command)) + if err != nil { + return nil, err } - return nil, errors.New(errors.CodeNotFound, fmt.Sprintf("containerID %q is not found in the pod list", containerID)) + u, err = k.exec(u) + if err != nil { + return nil, err + } + return k.readFileContents(u) + } -// WaitForTermination of the given containerID, set the timeout to 0 to discard it -func (k *kubeletClient) WaitForTermination(ctx context.Context, containerID string, timeout time.Duration) error { - return execcommon.WaitForTermination(ctx, k, containerID, timeout) +// WaitForTermination of the given container, set the timeout to 0 to discard it +func (k *kubeletClient) WaitForTermination(ctx context.Context, containerNames []string, timeout time.Duration) error { + return execcommon.WaitForTermination(ctx, k, containerNames, timeout) } func (k *kubeletClient) KillContainer(pod *corev1.Pod, container *corev1.ContainerStatus, sig syscall.Signal) error { @@ -294,10 +292,10 @@ func (k *kubeletClient) KillContainer(pod *corev1.Pod, container *corev1.Contain return err } -func (k *kubeletClient) KillGracefully(ctx context.Context, containerID string, terminationGracePeriodDuration time.Duration) error { - return execcommon.KillGracefully(ctx, k, containerID, terminationGracePeriodDuration) +func (k *kubeletClient) KillGracefully(ctx context.Context, containerNames []string, terminationGracePeriodDuration time.Duration) error { + return execcommon.KillGracefully(ctx, k, containerNames, terminationGracePeriodDuration) } -func (k *kubeletClient) CopyArchive(ctx context.Context, containerID, sourcePath, destPath string) error { - return execcommon.CopyArchive(ctx, k, containerID, sourcePath, destPath) +func (k *kubeletClient) CopyArchive(ctx context.Context, containerName, sourcePath, destPath string) error { + return execcommon.CopyArchive(ctx, k, containerName, sourcePath, destPath) } diff --git a/workflow/executor/kubelet/kubelet.go b/workflow/executor/kubelet/kubelet.go index 6bcc4285aace..dd9e00e5bb12 100644 --- a/workflow/executor/kubelet/kubelet.go +++ b/workflow/executor/kubelet/kubelet.go @@ -15,9 +15,9 @@ type KubeletExecutor struct { cli *kubeletClient } -func NewKubeletExecutor() (*KubeletExecutor, error) { +func NewKubeletExecutor(namespace, podName string) (*KubeletExecutor, error) { log.Infof("Creating a kubelet executor") - cli, err := newKubeletClient() + cli, err := newKubeletClient(namespace, podName) if err != nil { return nil, errors.InternalWrapError(err) } @@ -26,24 +26,24 @@ func NewKubeletExecutor() (*KubeletExecutor, error) { }, nil } -func (k *KubeletExecutor) GetFileContents(containerID string, sourcePath string) (string, error) { +func (k *KubeletExecutor) GetFileContents(containerName string, sourcePath string) (string, error) { return "", errors.Errorf(errors.CodeNotImplemented, "GetFileContents() is not implemented in the kubelet executor.") } -func (k *KubeletExecutor) CopyFile(containerID string, sourcePath string, destPath string, compressionLevel int) error { +func (k *KubeletExecutor) CopyFile(containerName string, sourcePath string, destPath string, compressionLevel int) error { return errors.Errorf(errors.CodeNotImplemented, "CopyFile() is not implemented in the kubelet executor.") } -func (k *KubeletExecutor) GetOutputStream(ctx context.Context, containerID string, combinedOutput bool) (io.ReadCloser, error) { +func (k *KubeletExecutor) GetOutputStream(ctx context.Context, containerName string, combinedOutput bool) (io.ReadCloser, error) { if !combinedOutput { log.Warn("non combined output unsupported") } - return k.cli.GetLogStream(containerID) + return k.cli.GetLogStream(containerName) } -func (k *KubeletExecutor) GetExitCode(ctx context.Context, containerID string) (string, error) { - log.Infof("Getting exit code of %s", containerID) - _, status, err := k.cli.GetContainerStatus(ctx, containerID) +func (k *KubeletExecutor) GetExitCode(ctx context.Context, containerName string) (string, error) { + log.Infof("Getting exit code of %q", containerName) + _, status, err := k.cli.GetContainerStatus(ctx, containerName) if err != nil { return "", errors.InternalWrapError(err, "Could not get container status") } @@ -53,22 +53,12 @@ func (k *KubeletExecutor) GetExitCode(ctx context.Context, containerID string) ( return "", nil } -func (k *KubeletExecutor) WaitInit() error { - return nil -} - // Wait for the container to complete -func (k *KubeletExecutor) Wait(ctx context.Context, containerID string) error { - return k.cli.WaitForTermination(ctx, containerID, 0) +func (k *KubeletExecutor) Wait(ctx context.Context, containerNames, sidecars []string) error { + return k.cli.WaitForTermination(ctx, containerNames, 0) } -// Kill kills a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period -func (k *KubeletExecutor) Kill(ctx context.Context, containerIDs []string, terminationGracePeriodDuration time.Duration) error { - for _, containerID := range containerIDs { - err := k.cli.KillGracefully(ctx, containerID, terminationGracePeriodDuration) - if err != nil { - return err - } - } - return nil +// Kill kills a list of containers first with a SIGTERM then with a SIGKILL after a grace period +func (k *KubeletExecutor) Kill(ctx context.Context, containerNames []string, terminationGracePeriodDuration time.Duration) error { + return k.cli.KillGracefully(ctx, containerNames, terminationGracePeriodDuration) } diff --git a/workflow/executor/mocks/ContainerRuntimeExecutor.go b/workflow/executor/mocks/ContainerRuntimeExecutor.go index 4e80265654a9..1479ada50d07 100644 --- a/workflow/executor/mocks/ContainerRuntimeExecutor.go +++ b/workflow/executor/mocks/ContainerRuntimeExecutor.go @@ -17,13 +17,13 @@ type ContainerRuntimeExecutor struct { mock.Mock } -// CopyFile provides a mock function with given fields: containerID, sourcePath, destPath, compressionLevel -func (_m *ContainerRuntimeExecutor) CopyFile(containerID string, sourcePath string, destPath string, compressionLevel int) error { - ret := _m.Called(containerID, sourcePath, destPath, compressionLevel) +// CopyFile provides a mock function with given fields: containerName, sourcePath, destPath, compressionLevel +func (_m *ContainerRuntimeExecutor) CopyFile(containerName string, sourcePath string, destPath string, compressionLevel int) error { + ret := _m.Called(containerName, sourcePath, destPath, compressionLevel) var r0 error if rf, ok := ret.Get(0).(func(string, string, string, int) error); ok { - r0 = rf(containerID, sourcePath, destPath, compressionLevel) + r0 = rf(containerName, sourcePath, destPath, compressionLevel) } else { r0 = ret.Error(0) } @@ -31,20 +31,20 @@ func (_m *ContainerRuntimeExecutor) CopyFile(containerID string, sourcePath stri return r0 } -// GetExitCode provides a mock function with given fields: ctx, containerID -func (_m *ContainerRuntimeExecutor) GetExitCode(ctx context.Context, containerID string) (string, error) { - ret := _m.Called(ctx, containerID) +// GetExitCode provides a mock function with given fields: ctx, containerName +func (_m *ContainerRuntimeExecutor) GetExitCode(ctx context.Context, containerName string) (string, error) { + ret := _m.Called(ctx, containerName) var r0 string if rf, ok := ret.Get(0).(func(context.Context, string) string); ok { - r0 = rf(ctx, containerID) + r0 = rf(ctx, containerName) } else { r0 = ret.Get(0).(string) } var r1 error if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, containerID) + r1 = rf(ctx, containerName) } else { r1 = ret.Error(1) } @@ -52,20 +52,20 @@ func (_m *ContainerRuntimeExecutor) GetExitCode(ctx context.Context, containerID return r0, r1 } -// GetFileContents provides a mock function with given fields: containerID, sourcePath -func (_m *ContainerRuntimeExecutor) GetFileContents(containerID string, sourcePath string) (string, error) { - ret := _m.Called(containerID, sourcePath) +// GetFileContents provides a mock function with given fields: containerName, sourcePath +func (_m *ContainerRuntimeExecutor) GetFileContents(containerName string, sourcePath string) (string, error) { + ret := _m.Called(containerName, sourcePath) var r0 string if rf, ok := ret.Get(0).(func(string, string) string); ok { - r0 = rf(containerID, sourcePath) + r0 = rf(containerName, sourcePath) } else { r0 = ret.Get(0).(string) } var r1 error if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(containerID, sourcePath) + r1 = rf(containerName, sourcePath) } else { r1 = ret.Error(1) } @@ -73,13 +73,13 @@ func (_m *ContainerRuntimeExecutor) GetFileContents(containerID string, sourcePa return r0, r1 } -// GetOutputStream provides a mock function with given fields: ctx, containerID, combinedOutput -func (_m *ContainerRuntimeExecutor) GetOutputStream(ctx context.Context, containerID string, combinedOutput bool) (io.ReadCloser, error) { - ret := _m.Called(ctx, containerID, combinedOutput) +// GetOutputStream provides a mock function with given fields: ctx, containerName, combinedOutput +func (_m *ContainerRuntimeExecutor) GetOutputStream(ctx context.Context, containerName string, combinedOutput bool) (io.ReadCloser, error) { + ret := _m.Called(ctx, containerName, combinedOutput) var r0 io.ReadCloser if rf, ok := ret.Get(0).(func(context.Context, string, bool) io.ReadCloser); ok { - r0 = rf(ctx, containerID, combinedOutput) + r0 = rf(ctx, containerName, combinedOutput) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(io.ReadCloser) @@ -88,7 +88,7 @@ func (_m *ContainerRuntimeExecutor) GetOutputStream(ctx context.Context, contain var r1 error if rf, ok := ret.Get(1).(func(context.Context, string, bool) error); ok { - r1 = rf(ctx, containerID, combinedOutput) + r1 = rf(ctx, containerName, combinedOutput) } else { r1 = ret.Error(1) } @@ -96,13 +96,13 @@ func (_m *ContainerRuntimeExecutor) GetOutputStream(ctx context.Context, contain return r0, r1 } -// Kill provides a mock function with given fields: ctx, containerIDs, terminationGracePeriodDuration -func (_m *ContainerRuntimeExecutor) Kill(ctx context.Context, containerIDs []string, terminationGracePeriodDuration time.Duration) error { - ret := _m.Called(ctx, containerIDs, terminationGracePeriodDuration) +// Kill provides a mock function with given fields: ctx, containerNames, terminationGracePeriodDuration +func (_m *ContainerRuntimeExecutor) Kill(ctx context.Context, containerNames []string, terminationGracePeriodDuration time.Duration) error { + ret := _m.Called(ctx, containerNames, terminationGracePeriodDuration) var r0 error if rf, ok := ret.Get(0).(func(context.Context, []string, time.Duration) error); ok { - r0 = rf(ctx, containerIDs, terminationGracePeriodDuration) + r0 = rf(ctx, containerNames, terminationGracePeriodDuration) } else { r0 = ret.Error(0) } @@ -110,27 +110,13 @@ func (_m *ContainerRuntimeExecutor) Kill(ctx context.Context, containerIDs []str return r0 } -// Wait provides a mock function with given fields: ctx, containerID -func (_m *ContainerRuntimeExecutor) Wait(ctx context.Context, containerID string) error { - ret := _m.Called(ctx, containerID) +// Wait provides a mock function with given fields: ctx, containerNames, sidecarNames +func (_m *ContainerRuntimeExecutor) Wait(ctx context.Context, containerNames []string, sidecarNames []string) error { + ret := _m.Called(ctx, containerNames, sidecarNames) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { - r0 = rf(ctx, containerID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitInit provides a mock function with given fields: -func (_m *ContainerRuntimeExecutor) WaitInit() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context, []string, []string) error); ok { + r0 = rf(ctx, containerNames, sidecarNames) } else { r0 = ret.Error(0) } diff --git a/workflow/executor/pns/pns.go b/workflow/executor/pns/pns.go index e1f995729e8c..dd0ff5277ba6 100644 --- a/workflow/executor/pns/pns.go +++ b/workflow/executor/pns/pns.go @@ -4,7 +4,6 @@ import ( "bufio" "context" "fmt" - "io" "io/ioutil" "os" "strings" @@ -16,72 +15,60 @@ import ( gops "github.com/mitchellh/go-ps" log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "github.com/argoproj/argo-workflows/v3/errors" "github.com/argoproj/argo-workflows/v3/util/archive" - errorsutil "github.com/argoproj/argo-workflows/v3/util/errors" - waitutil "github.com/argoproj/argo-workflows/v3/util/wait" "github.com/argoproj/argo-workflows/v3/workflow/common" execcommon "github.com/argoproj/argo-workflows/v3/workflow/executor/common" - argowait "github.com/argoproj/argo-workflows/v3/workflow/executor/common/wait" + "github.com/argoproj/argo-workflows/v3/workflow/executor/k8sapi" osspecific "github.com/argoproj/argo-workflows/v3/workflow/executor/os-specific" ) +var errContainerNameNotFound = fmt.Errorf("container name not found") + type PNSExecutor struct { - clientset *kubernetes.Clientset + *k8sapi.K8sAPIExecutor podName string namespace string + containers map[string]string // container name -> container ID + // ctrIDToPid maps a containerID to a process ID ctrIDToPid map[string]int - // pidToCtrID maps a process ID to a container ID - pidToCtrID map[int]string // pidFileHandles holds file handles to all root containers - pidFileHandles map[int]*fileInfo + pidFileHandles map[int]*os.File // thisPID is the pid of this process thisPID int - // mainFS holds a file descriptor to the main filesystem, allowing the executor to access the - // filesystem after the main process exited - mainFS *os.File // rootFS holds a file descriptor to the root filesystem, allowing the executor to exit out of a chroot rootFS *os.File - // debug enables additional debugging - debug bool - // hasOutputs indicates if the template has outputs. determines if we need to - hasOutputs bool } -type fileInfo struct { - file os.File - info os.FileInfo -} - -func NewPNSExecutor(clientset *kubernetes.Clientset, podName, namespace string, hasOutputs bool) (*PNSExecutor, error) { +func NewPNSExecutor(clientset *kubernetes.Clientset, podName, namespace string) (*PNSExecutor, error) { thisPID := os.Getpid() - log.Infof("Creating PNS executor (namespace: %s, pod: %s, pid: %d, hasOutputs: %v)", namespace, podName, thisPID, hasOutputs) + log.Infof("Creating PNS executor (namespace: %s, pod: %s, pid: %d)", namespace, podName, thisPID) if thisPID == 1 { return nil, errors.New(errors.CodeBadRequest, "process namespace sharing is not enabled on pod") } + delegate, err := k8sapi.NewK8sAPIExecutor(clientset, nil, podName, namespace) + if err != nil { + return nil, err + } return &PNSExecutor{ - clientset: clientset, + K8sAPIExecutor: delegate, podName: podName, namespace: namespace, + containers: make(map[string]string), ctrIDToPid: make(map[string]int), - pidToCtrID: make(map[int]string), - pidFileHandles: make(map[int]*fileInfo), + pidFileHandles: make(map[int]*os.File), thisPID: thisPID, - debug: log.GetLevel() == log.DebugLevel, - hasOutputs: hasOutputs, }, nil } -func (p *PNSExecutor) GetFileContents(containerID string, sourcePath string) (string, error) { - err := p.enterChroot() +func (p *PNSExecutor) GetFileContents(containerName string, sourcePath string) (string, error) { + err := p.enterChroot(containerName) if err != nil { return "", err } @@ -94,15 +81,15 @@ func (p *PNSExecutor) GetFileContents(containerID string, sourcePath string) (st } // enterChroot enters chroot of the main container -func (p *PNSExecutor) enterChroot() error { - if p.mainFS == nil { - return errors.InternalErrorf("could not chroot into main for artifact collection: container may have exited too quickly") +func (p *PNSExecutor) enterChroot(containerName string) error { + pid, err := p.getContainerPID(containerName) + if err != nil { + return fmt.Errorf("failed to get container PID: %w", err) } - if err := p.mainFS.Chdir(); err != nil { + if err := p.pidFileHandles[pid].Chdir(); err != nil { return errors.InternalWrapErrorf(err, "failed to chdir to main filesystem: %v", err) } - err := osspecific.CallChroot() - if err != nil { + if err := osspecific.CallChroot(); err != nil { return errors.InternalWrapErrorf(err, "failed to chroot to main filesystem: %v", err) } return nil @@ -121,7 +108,7 @@ func (p *PNSExecutor) exitChroot() error { } // CopyFile copies a source file in a container to a local path -func (p *PNSExecutor) CopyFile(containerID string, sourcePath string, destPath string, compressionLevel int) (err error) { +func (p *PNSExecutor) CopyFile(containerName string, sourcePath string, destPath string, compressionLevel int) (err error) { destFile, err := os.Create(destPath) if err != nil { return err @@ -138,7 +125,7 @@ func (p *PNSExecutor) CopyFile(containerID string, sourcePath string, destPath s } }() w := bufio.NewWriter(destFile) - err = p.enterChroot() + err = p.enterChroot(containerName) if err != nil { return err } @@ -147,53 +134,61 @@ func (p *PNSExecutor) CopyFile(containerID string, sourcePath string, destPath s return err } -func (p *PNSExecutor) WaitInit() error { - if !p.hasOutputs { - return nil - } - go p.pollRootProcesses(time.Minute) +func (p *PNSExecutor) Wait(ctx context.Context, containerNames, sidecarNames []string) error { + + allContainerNames := append(containerNames, sidecarNames...) + go p.pollRootProcesses(ctx, allContainerNames) + // Secure a filehandle on our own root. This is because we will chroot back and forth from // the main container's filesystem, to our own. rootFS, err := os.Open("/") if err != nil { - return errors.InternalWrapError(err) + return fmt.Errorf("failed to open my own root: %w", err) } p.rootFS = rootFS - return nil -} -// Wait for the container to complete -func (p *PNSExecutor) Wait(ctx context.Context, containerID string) error { - mainPID, err := p.getContainerPID(containerID) - if err != nil { - log.Warnf("Failed to get main PID: %v", err) - if !p.hasOutputs { - log.Warnf("Ignoring wait failure: %v. Process assumed to have completed", err) - return nil - } - return argowait.UntilTerminated(ctx, p.clientset, p.namespace, p.podName, containerID) - } - log.Infof("Main pid identified as %d", mainPID) - for pid, f := range p.pidFileHandles { - if pid == mainPID { - log.Info("Successfully secured file handle on main container root filesystem") - p.mainFS = &f.file - } else { - log.Infof("Closing root filehandle for non-main pid %d", pid) - _ = f.file.Close() - } + if !p.haveContainers(allContainerNames) { // allow some additional time for polling to get this data + time.Sleep(3 * time.Second) } - if p.mainFS == nil { - log.Warn("Failed to secure file handle on main container's root filesystem. Output artifacts from base image layer will fail") + + if !p.haveContainers(containerNames) { + log.Info("container PID still unknown (maybe due to short running main container)") + err := p.K8sAPIExecutor.Until(ctx, func(pod *corev1.Pod) bool { + for _, c := range pod.Status.ContainerStatuses { + containerID := execcommon.GetContainerID(c.ContainerID) + p.containers[c.Name] = containerID + log.Infof("mapped container name %q to container ID %q", c.Name, containerID) + } + return p.haveContainers(allContainerNames) + }) + if err != nil { + return err + } } - // wait for pid to complete - log.Infof("Waiting for main pid %d to complete", mainPID) - err = executil.WaitPID(mainPID) - if err != nil { - return err + for _, containerName := range containerNames { + pid, err := p.getContainerPID(containerName) + if err != nil { + return err + } + log.Infof("Waiting for %q pid %d to complete", containerName, pid) + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + p, err := gops.FindProcess(pid) + if err != nil { + return fmt.Errorf("failed to find %q process: %w", containerName, err) + } + if p == nil { + log.Infof("%q pid %d completed", containerName, pid) + return nil + } + time.Sleep(3 * time.Second) + } + } } - log.Infof("Main pid %d completed", mainPID) return nil } @@ -202,68 +197,56 @@ func (p *PNSExecutor) Wait(ctx context.Context, containerID string) error { // It opens file handles on all root pids because at this point, we do not yet know which pid is the // "main" container. // Polling is necessary because it is not possible to use something like fsnotify against procfs. -func (p *PNSExecutor) pollRootProcesses(timeout time.Duration) { - log.Warnf("Polling root processes (%v)", timeout) - deadline := time.Now().Add(timeout) +func (p *PNSExecutor) pollRootProcesses(ctx context.Context, containerNames []string) { + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() for { - p.updateCtrIDMap() - if p.mainFS != nil { - log.Info("Stopped root processes polling due to successful securing of main root fs") - break - } - if time.Now().After(deadline) { - log.Warnf("Polling root processes timed out (%v)", timeout) - break + select { + case <-ctx.Done(): + return + default: + if err := p.secureRootFiles(); err != nil { + log.WithError(err).Warn("failed to secure root files") + } + if p.haveContainers(containerNames) { + return + } + time.Sleep(50 * time.Millisecond) } - time.Sleep(50 * time.Millisecond) } } -func (p *PNSExecutor) GetOutputStream(ctx context.Context, containerID string, combinedOutput bool) (io.ReadCloser, error) { - if !combinedOutput { - log.Warn("non combined output unsupported") - } - opts := corev1.PodLogOptions{ - Container: common.MainContainerName, - Follow: true, - } - return p.clientset.CoreV1().Pods(p.namespace).GetLogs(p.podName, &opts).Stream(ctx) -} - -func (p *PNSExecutor) GetExitCode(ctx context.Context, containerID string) (string, error) { - log.Infof("Getting exit code of %s", containerID) - _, containerStatus, err := p.GetTerminatedContainerStatus(ctx, containerID) - if err != nil { - return "", fmt.Errorf("could not get container status: %s", err) - } - if containerStatus.State.Terminated != nil { - return fmt.Sprint(containerStatus.State.Terminated.ExitCode), nil +func (d *PNSExecutor) haveContainers(containerNames []string) bool { + for _, n := range containerNames { + if d.ctrIDToPid[d.containers[n]] == 0 { + return false + } } - return "", nil + return true } -// Kill a list of containerIDs first with a SIGTERM then with a SIGKILL after a grace period -func (p *PNSExecutor) Kill(ctx context.Context, containerIDs []string, terminationGracePeriodDuration time.Duration) error { +// Kill a list of containers first with a SIGTERM then with a SIGKILL after a grace period +func (p *PNSExecutor) Kill(ctx context.Context, containerNames []string, terminationGracePeriodDuration time.Duration) error { var asyncErr error wg := sync.WaitGroup{} - for _, cid := range containerIDs { + for _, containerName := range containerNames { wg.Add(1) - go func(containerID string) { - err := p.killContainer(containerID, terminationGracePeriodDuration) + go func(containerName string) { + err := p.killContainer(ctx, containerName, terminationGracePeriodDuration) if err != nil && asyncErr != nil { asyncErr = err } wg.Done() - }(cid) + }(containerName) } wg.Wait() return asyncErr } -func (p *PNSExecutor) killContainer(containerID string, terminationGracePeriodDuration time.Duration) error { - pid, err := p.getContainerPID(containerID) +func (p *PNSExecutor) killContainer(ctx context.Context, containerName string, terminationGracePeriodDuration time.Duration) error { + pid, err := p.getContainerPID(containerName) if err != nil { - log.Warnf("Ignoring kill container failure of %s: %v. Process assumed to have completed", containerID, err) + log.Warnf("Ignoring kill container failure of %q: %v. Process assumed to have completed", containerName, err) return nil } // On Unix systems, FindProcess always succeeds and returns a Process @@ -293,111 +276,76 @@ func (p *PNSExecutor) killContainer(containerID string, terminationGracePeriodDu // getContainerPID returns the pid associated with the container id. Returns error if it was unable // to be determined because no running root processes exist with that container ID -func (p *PNSExecutor) getContainerPID(containerID string) (int, error) { - pid, ok := p.ctrIDToPid[containerID] - if ok { - return pid, nil - } - p.updateCtrIDMap() - pid, ok = p.ctrIDToPid[containerID] +func (p *PNSExecutor) getContainerPID(containerName string) (int, error) { + containerID, ok := p.containers[containerName] if !ok { - return -1, errors.InternalErrorf("Failed to determine pid for containerID %s: container may have exited too quickly", containerID) + return 0, fmt.Errorf("container ID not found for container name %q", containerName) + } + pid := p.ctrIDToPid[containerID] + if pid == 0 { + return 0, fmt.Errorf("pid not found for container ID %q", containerID) } return pid, nil } -// updateCtrIDMap updates the mapping between container IDs to PIDs -func (p *PNSExecutor) updateCtrIDMap() { - allProcs, err := gops.Processes() +func containerNameForPID(pid int) (string, error) { + data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/environ", pid)) if err != nil { - log.Warnf("Failed to list processes: %v", err) - return - } - for _, proc := range allProcs { - pid := proc.Pid() - if pid == 1 || pid == p.thisPID || proc.PPid() != 0 { - // ignore the pause container, our own pid, and non-root processes - continue + return "", err + } + prefix := common.EnvVarContainerName + "=" + for _, l := range strings.Split(string(data), "\000") { + if strings.HasPrefix(l, prefix) { + return strings.TrimPrefix(l, prefix), nil } + } + return "", errContainerNameNotFound +} - // Useful code for debugging: - if p.debug { - if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/root", pid) + "/etc/os-release"); err == nil { - log.Infof("pid %d: %s", pid, string(data)) - _, _ = parseContainerID(pid) +func (p *PNSExecutor) secureRootFiles() error { + processes, err := gops.Processes() + if err != nil { + return err + } + for _, proc := range processes { + err = func() error { + pid := proc.Pid() + if pid == 1 || pid == p.thisPID || proc.PPid() != 0 { + // ignore the pause container, our own pid, and non-root processes + return nil } - } - if p.hasOutputs && p.mainFS == nil { - rootPath := fmt.Sprintf("/proc/%d/root", pid) - currInfo, err := os.Stat(rootPath) + fs, err := os.Open(fmt.Sprintf("/proc/%d/root", pid)) if err != nil { - log.Warnf("Failed to stat %s: %v", rootPath, err) - continue + return err } - log.Infof("pid %d: %v", pid, currInfo) - prevInfo := p.pidFileHandles[pid] - // Secure the root filehandle of the process. NOTE if the file changed, it means that // the main container may have switched (e.g. gone from busybox to the user's container) - if prevInfo == nil || !os.SameFile(prevInfo.info, currInfo) { - fs, err := os.Open(rootPath) - if err != nil { - log.Warnf("Failed to open %s: %v", rootPath, err) - continue - } - log.Infof("Secured filehandle on %s", rootPath) - p.pidFileHandles[pid] = &fileInfo{ - info: currInfo, - file: *fs, - } - if prevInfo != nil { - _ = prevInfo.file.Close() - } + if prevInfo, ok := p.pidFileHandles[pid]; ok { + _ = prevInfo.Close() } - } + p.pidFileHandles[pid] = fs + log.Infof("secured root for pid %d root: %s", pid, proc.Executable()) - // Update maps of pids to container ids - if _, ok := p.pidToCtrID[pid]; !ok { containerID, err := parseContainerID(pid) if err != nil { - log.Warnf("Failed to identify containerID for process %d", pid) - continue + return err } - log.Infof("containerID %s mapped to pid %d", containerID, pid) p.ctrIDToPid[containerID] = pid - p.pidToCtrID[pid] = containerID - } - } -} - -var backoffOver30s = wait.Backoff{ - Duration: 1 * time.Second, - Steps: 7, - Factor: 2, -} - -func (p *PNSExecutor) GetTerminatedContainerStatus(ctx context.Context, containerID string) (*corev1.Pod, *corev1.ContainerStatus, error) { - var pod *corev1.Pod - var containerStatus *corev1.ContainerStatus - // Under high load, the Kubernetes API may be unresponsive for some time (30s). This would have failed the workflow - // previously (<=v2.11) but a 30s back-off mitigates this. - err := waitutil.Backoff(backoffOver30s, func() (bool, error) { - podRes, err := p.clientset.CoreV1().Pods(p.namespace).Get(ctx, p.podName, metav1.GetOptions{}) - if err != nil { - return !errorsutil.IsTransientErr(err), err - } - for _, containerStatusRes := range podRes.Status.ContainerStatuses { - if execcommon.GetContainerID(&containerStatusRes) != containerID { - continue + log.Infof("mapped pid %d to container ID %q", pid, containerID) + containerName, err := containerNameForPID(pid) + if err != nil { + return err } - pod = podRes - containerStatus = &containerStatusRes - return containerStatus.State.Terminated != nil, nil + p.containers[containerName] = containerID + log.Infof("mapped container name %q to container ID %q and pid %d", containerName, containerID, pid) + return nil + }() + if err != nil { + log.WithError(err).Warnf("failed to secure root file handle for %d", proc.Pid()) } - return false, nil - }) - return pod, containerStatus, err + } + return nil } // parseContainerID parses the containerID of a pid diff --git a/workflow/executor/pns/pns_test.go b/workflow/executor/pns/pns_test.go index 40cc9918db6b..b04e1540c2b1 100644 --- a/workflow/executor/pns/pns_test.go +++ b/workflow/executor/pns/pns_test.go @@ -2,22 +2,10 @@ package pns import ( "testing" - "time" "github.com/stretchr/testify/assert" ) -func Test_backoffOver30s(t *testing.T) { - x := backoffOver30s - assert.Equal(t, 1*time.Second, x.Step()) - assert.Equal(t, 2*time.Second, x.Step()) - assert.Equal(t, 4*time.Second, x.Step()) - assert.Equal(t, 8*time.Second, x.Step()) - assert.Equal(t, 16*time.Second, x.Step()) - assert.Equal(t, 32*time.Second, x.Step()) - assert.Equal(t, 64*time.Second, x.Step()) -} - func TestPNSExecutor_parseContainerIDFromCgroupLine(t *testing.T) { testCases := []struct { line string diff --git a/workflow/executor/resource_test.go b/workflow/executor/resource_test.go index 08891cf4ce68..ca59dfb77b8a 100644 --- a/workflow/executor/resource_test.go +++ b/workflow/executor/resource_test.go @@ -33,7 +33,6 @@ func TestResourceFlags(t *testing.T) { PodAnnotationsPath: fakeAnnotations, ExecutionControl: nil, RuntimeExecutor: &mockRuntimeExecutor, - mainContainerID: fakeContainerID, } args, err := we.getKubectlArguments("fake", "../../examples/hello-world.yaml", fakeFlags) From 2f7c9087c1cc680e0dc7df25a49095c2658bbd7b Mon Sep 17 00:00:00 2001 From: Yuan Tang Date: Fri, 12 Feb 2021 15:44:02 -0500 Subject: [PATCH 6/9] build: Fix path to openapi-gen binary (#5089) Signed-off-by: terrytangyuan --- Makefile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 57ce2019f6e6..1a20bdd98c2d 100644 --- a/Makefile +++ b/Makefile @@ -106,11 +106,11 @@ define protoc -I /usr/local/include \ -I $(CURDIR) \ -I $(CURDIR)/vendor \ - -I ${GOPATH}/src \ - -I ${GOPATH}/pkg/mod/github.com/gogo/protobuf@v1.3.1/gogoproto \ - -I ${GOPATH}/pkg/mod/github.com/grpc-ecosystem/grpc-gateway@v1.16.0/third_party/googleapis \ - --gogofast_out=plugins=grpc:${GOPATH}/src \ - --grpc-gateway_out=logtostderr=true:${GOPATH}/src \ + -I $(GOPATH)/src \ + -I $(GOPATH)/pkg/mod/github.com/gogo/protobuf@v1.3.1/gogoproto \ + -I $(GOPATH)/pkg/mod/github.com/grpc-ecosystem/grpc-gateway@v1.16.0/third_party/googleapis \ + --gogofast_out=plugins=grpc:$(GOPATH)/src \ + --grpc-gateway_out=logtostderr=true:$(GOPATH)/src \ --swagger_out=logtostderr=true,fqn_for_swagger_name=true:. \ $(1) perl -i -pe 's|argoproj/argo-workflows/|argoproj/argo-workflows/v3/|g' `echo "$(1)" | sed 's/proto/pb.go/g'` @@ -298,7 +298,7 @@ $(GOPATH)/bin/goimports: pkg/apis/workflow/v1alpha1/generated.proto: $(GOPATH)/bin/go-to-protobuf $(PROTO_BINARIES) $(TYPES) go mod vendor [ -e v3 ] || ln -s . v3 - ${GOPATH}/bin/go-to-protobuf \ + $(GOPATH)/bin/go-to-protobuf \ --go-header-file=./hack/custom-boilerplate.go.txt \ --packages=github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ --apimachinery-packages=+k8s.io/apimachinery/pkg/util/intstr,+k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime/schema,+k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1,k8s.io/api/policy/v1beta1 \ @@ -498,7 +498,7 @@ clean: pkg/apis/workflow/v1alpha1/openapi_generated.go: $(GOPATH)/bin/openapi-gen $(TYPES) [ -e v3 ] || ln -s . v3 - openapi-gen \ + $(GOPATH)/bin/openapi-gen \ --go-header-file ./hack/custom-boilerplate.go.txt \ --input-dirs github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ --output-package github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ @@ -509,7 +509,7 @@ pkg/apis/workflow/v1alpha1/openapi_generated.go: $(GOPATH)/bin/openapi-gen $(TYP # generates many other files (listers, informers, client etc). pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go: $(TYPES) [ -e v3 ] || ln -s . v3 - bash ${GOPATH}/pkg/mod/k8s.io/code-generator@v0.19.6/generate-groups.sh \ + bash $(GOPATH)/pkg/mod/k8s.io/code-generator@v0.19.6/generate-groups.sh \ "deepcopy,client,informer,lister" \ github.com/argoproj/argo-workflows/v3/pkg/client github.com/argoproj/argo-workflows/v3/pkg/apis \ workflow:v1alpha1 \ From 210080a0c0cb5fc40ec82859cc496a948e30687a Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Fri, 12 Feb 2021 14:49:28 -0800 Subject: [PATCH 7/9] feat(controller): Logs Kubernetes API requests (#5084) Signed-off-by: Alex Collins --- cmd/workflow-controller/main.go | 2 ++ workflow/controller/controller.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/workflow-controller/main.go b/cmd/workflow-controller/main.go index f5ff42a23511..50b6c9249b5d 100644 --- a/cmd/workflow-controller/main.go +++ b/cmd/workflow-controller/main.go @@ -22,6 +22,7 @@ import ( wfclientset "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" cmdutil "github.com/argoproj/argo-workflows/v3/util/cmd" + "github.com/argoproj/argo-workflows/v3/util/logs" "github.com/argoproj/argo-workflows/v3/workflow/controller" "github.com/argoproj/argo-workflows/v3/workflow/metrics" ) @@ -67,6 +68,7 @@ func NewRootCommand() *cobra.Command { config.Burst = burst config.QPS = qps + logs.AddK8SLogTransportWrapper(config) metrics.AddMetricsTransportWrapper(config) namespace, _, err := clientConfig.Namespace() diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index 17f89fab73f4..86b0b3531ca8 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -228,7 +228,7 @@ func (wfc *WorkflowController) Run(ctx context.Context, wfWorkers, workflowTTLWo ReleaseOnCancel: true, LeaseDuration: 15 * time.Second, RenewDeadline: 10 * time.Second, - RetryPeriod: 2 * time.Second, + RetryPeriod: 5 * time.Second, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { logCtx.Info("started leading") From 19b22f25a4bfd900752947f695f7a3a1567149ef Mon Sep 17 00:00:00 2001 From: Simon Behar Date: Fri, 12 Feb 2021 16:01:05 -0800 Subject: [PATCH 8/9] feat: Add checker to ensure that env variable doc is up to date (#5091) Signed-off-by: Simon Behar --- Makefile | 1 + docs/environment-variables.md | 2 ++ hack/check-env-doc.sh | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100755 hack/check-env-doc.sh diff --git a/Makefile b/Makefile index 1a20bdd98c2d..ab2442a6e5f3 100644 --- a/Makefile +++ b/Makefile @@ -259,6 +259,7 @@ codegen: \ go generate ./persist/sqldb ./pkg/apiclient/workflow ./server/auth ./server/auth/sso ./workflow/executor rm -Rf vendor go mod tidy + ./hack/check-env-doc.sh $(GOPATH)/bin/mockery: ./hack/recurl.sh dist/mockery.tar.gz https://github.com/vektra/mockery/releases/download/v1.1.1/mockery_1.1.1_$(shell uname -s)_$(shell uname -m).tar.gz diff --git a/docs/environment-variables.md b/docs/environment-variables.md index b9410671fd3f..fb5b16454e13 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -23,6 +23,8 @@ Note that these environment variables may be removed at any time. | `TRANSIENT_ERROR_PATTERN` | `string` | The regular expression that represents additional patterns for transient errors. | | `WF_DEL_PROPAGATION_POLICY` | `string` | The deletion propogation policy for workflows. | | `WORKFLOW_GC_PERIOD` | `time.Duration` | The periodicity for GC of workflows. | +| `BUBBLE_ENTRY_TEMPLATE_ERR` | `bool` | Whether to bubble up template errors to workflow. Default true | +| `INFORMER_WRITE_BACK` | `bool` | Whether to write back to informer instead of catching up. Deafult true | ## Executor diff --git a/hack/check-env-doc.sh b/hack/check-env-doc.sh new file mode 100755 index 000000000000..f76f2617932f --- /dev/null +++ b/hack/check-env-doc.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +echo "Checking docs/environment-variables.md for completeness..." + +function check-used { + grep "| \`" < ./docs/environment-variables.md \ + | awk '{gsub(/\`/, "", $2); print $2; }' \ + | while read -r x; do + var="${x%\`}"; + var="${var#\`}"; + if ! grep -qR --exclude="*_test.go" "$var" ./workflow ./persist ./util; then + echo "Documented variable $var in docs/environment-variables.md is not used anywhere"; + exit 1; + fi; + done +} + +function check-documented { + grep -REh --exclude="*_test.go" "Getenv.*?\(|LookupEnv.*?\(" ./workflow ./persist ./util \ + | grep -Eo "\"[A-Z_]+?\"" \ + | sort \ + | uniq \ + | while read -r x; do + var="${x%\"}"; + var="${var#\"}"; + if ! grep -q "$var" docs/environment-variables.md; then + echo "Variable $var not documented in docs/environment-variables.md"; + exit 1; + fi; + done +} + +check-used && check-documented && echo "Success!" From a50ddb206cd57a4667a3daa35e37bb9ed80f8cf0 Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Fri, 12 Feb 2021 17:07:21 -0800 Subject: [PATCH 9/9] chore: More opinionated linting (#5072) Signed-off-by: Alex Collins --- .golangci.yml | 15 ++++++- Makefile | 2 +- cmd/argo/commands/archive/delete.go | 2 +- cmd/argo/commands/archive/get.go | 6 +-- cmd/argo/commands/archive/list.go | 2 +- cmd/argo/commands/archive/root.go | 2 +- cmd/argo/commands/auth/root.go | 2 +- cmd/argo/commands/client/conn.go | 6 ++- cmd/argo/commands/client/conn_test.go | 1 - cmd/argo/commands/clustertemplate/create.go | 6 +-- .../commands/clustertemplate/create_test.go | 1 - cmd/argo/commands/clustertemplate/delete.go | 9 ++--- cmd/argo/commands/clustertemplate/get.go | 9 ++--- cmd/argo/commands/clustertemplate/lint.go | 9 ++--- cmd/argo/commands/clustertemplate/list.go | 7 +--- cmd/argo/commands/clustertemplate/root.go | 2 +- cmd/argo/commands/common.go | 3 +- cmd/argo/commands/completion.go | 2 +- cmd/argo/commands/cron/create.go | 4 +- cmd/argo/commands/cron/delete.go | 6 +-- cmd/argo/commands/cron/get.go | 6 +-- cmd/argo/commands/cron/lint.go | 6 +-- cmd/argo/commands/cron/list.go | 7 +--- cmd/argo/commands/cron/resume.go | 2 +- cmd/argo/commands/cron/root.go | 2 +- cmd/argo/commands/cron/suspend.go | 2 +- cmd/argo/commands/delete.go | 2 +- cmd/argo/commands/get.go | 8 +--- cmd/argo/commands/get_test.go | 31 +++++++------- cmd/argo/commands/lint.go | 6 +-- cmd/argo/commands/list.go | 2 +- cmd/argo/commands/list_test.go | 3 +- cmd/argo/commands/logs.go | 3 +- cmd/argo/commands/node.go | 7 +--- cmd/argo/commands/resubmit.go | 2 +- cmd/argo/commands/resume.go | 7 +--- cmd/argo/commands/retry.go | 2 +- cmd/argo/commands/root.go | 2 +- cmd/argo/commands/server.go | 2 +- cmd/argo/commands/stop.go | 7 +--- cmd/argo/commands/submit.go | 4 +- cmd/argo/commands/suspend.go | 2 +- cmd/argo/commands/template/create.go | 6 +-- cmd/argo/commands/template/delete.go | 7 +--- cmd/argo/commands/template/get.go | 9 ++--- cmd/argo/commands/template/lint.go | 6 +-- cmd/argo/commands/template/list.go | 7 +--- cmd/argo/commands/template/root.go | 2 +- cmd/argo/commands/terminate.go | 2 +- cmd/argo/commands/wait.go | 6 +-- cmd/argo/commands/watch.go | 7 +--- cmd/argo/main.go | 3 +- cmd/argoexec/commands/init.go | 2 +- cmd/argoexec/commands/resource.go | 2 +- cmd/argoexec/commands/root.go | 4 +- cmd/argoexec/commands/wait.go | 2 +- cmd/argoexec/main.go | 3 +- cmd/workflow-controller/main.go | 2 +- config/config.go | 2 +- config/controller.go | 3 +- errors/errors.go | 1 - hack/docgen.go | 6 ++- hack/swagger/kubeifyswagger.go | 2 +- hack/swagger/types.go | 6 ++- .../explosive_offload_node_status_repo.go | 9 +++-- persist/sqldb/null_workflow_archive.go | 3 +- persist/sqldb/offload_node_status_repo.go | 4 +- persist/sqldb/sqldb.go | 4 +- persist/sqldb/workflow_archive.go | 6 ++- pkg/apiclient/argo-kube-client.go | 6 ++- ...luster-workflow-template-service-client.go | 1 - pkg/apiclient/panic-intermediary.go | 3 +- pkg/apis/workflow/v1alpha1/register.go | 4 +- pkg/apis/workflow/v1alpha1/workflow_types.go | 23 +++++------ .../workflow/v1alpha1/workflow_types_test.go | 26 +++++++----- server/apiserver/argoserver.go | 6 +-- server/artifacts/artifact_server.go | 4 -- server/artifacts/artifact_server_test.go | 11 ++--- server/auth/mode_test.go | 1 + server/auth/serviceaccount/claims.go | 3 +- server/auth/sso/sso.go | 10 +++-- server/event/dispatch/operation_test.go | 6 ++- server/event/event_server.go | 2 - server/types/clients.go | 3 +- server/workflow/workflow_server.go | 7 +--- server/workflow/workflow_server_test.go | 10 ++++- .../archived_workflow_server.go | 1 - test/e2e/argo_server_test.go | 6 +-- test/e2e/cli_test.go | 16 +++----- test/e2e/cluster_workflow_template_test.go | 1 - test/e2e/fixtures/e2e_suite.go | 22 +++++----- test/e2e/fixtures/when.go | 16 ++++---- test/e2e/http_logger.go | 3 +- test/e2e/images/argosay/v2/main/argosay.go | 1 + test/e2e/workflow_template_test.go | 1 - test/stress/tool/main.go | 1 - test/test.go | 4 +- test/util/indexer.go | 1 + test/util/serviceaccount.go | 4 +- util/errors/errors_test.go | 10 +++-- util/file/fileutil.go | 4 +- util/instanceid/service.go | 1 - util/k8s/parse.go | 2 +- util/kubeconfig/kubeconfig.go | 14 ++----- util/kubeconfig/kubeconfig_test.go | 3 -- util/logs/workflow-logger.go | 1 - util/util.go | 4 +- workflow/artifacts/artifactory/artifactory.go | 2 - workflow/artifacts/artifacts.go | 5 +-- workflow/artifacts/gcs/gcs.go | 3 +- workflow/artifacts/oss/oss.go | 3 +- workflow/artifacts/raw/raw.go | 3 +- workflow/artifacts/raw/raw_test.go | 2 - workflow/artifacts/s3/s3.go | 6 +-- workflow/common/ancestry.go | 2 +- workflow/common/ancestry_test.go | 1 - workflow/common/common_test.go | 3 +- workflow/common/convert_test.go | 2 - workflow/common/params.go | 2 +- workflow/common/util.go | 10 ++--- workflow/common/util_test.go | 5 +-- workflow/controller/cache_test.go | 2 +- workflow/controller/controller.go | 14 +++---- workflow/controller/controller_test.go | 7 +++- workflow/controller/dag.go | 7 +--- workflow/controller/dag_test.go | 4 +- .../controller/estimation/dummy_estimator.go | 1 + workflow/controller/exec_control.go | 2 +- workflow/controller/operator.go | 38 +++++++----------- .../controller/operator_concurrency_test.go | 18 ++++----- workflow/controller/operator_metrics_test.go | 4 +- workflow/controller/operator_test.go | 28 +++++-------- .../controller/operator_wfdefault_test.go | 13 ++++-- .../operator_workflow_template_ref_test.go | 20 +++++----- workflow/controller/pod_cleanup_key.go | 7 ++-- workflow/controller/rate_limiters.go | 2 +- workflow/controller/scope.go | 2 - workflow/controller/scope_test.go | 28 ++++++------- workflow/controller/steps.go | 4 +- workflow/controller/steps_test.go | 2 +- workflow/controller/workflowpod.go | 40 +++++++------------ workflow/controller/workflowpod_test.go | 5 --- workflow/creator/creator_test.go | 1 - workflow/cron/operator_test.go | 3 +- workflow/events/event_recorder_manager.go | 1 - workflow/executor/docker/docker.go | 1 - workflow/executor/executor.go | 1 - workflow/executor/executor_test.go | 2 - workflow/executor/k8sapi/client.go | 4 +- workflow/executor/k8sapi/k8sapi.go | 9 ++--- workflow/executor/kubelet/client.go | 1 - workflow/executor/pns/pns.go | 10 ++--- workflow/executor/resource.go | 7 +--- workflow/hydrator/hydrator_test.go | 15 ++++--- workflow/metrics/k8s_request_total_metric.go | 18 ++++----- workflow/metrics/metrics.go | 1 + workflow/metrics/pod_missing_metric.go | 16 ++++---- workflow/metrics/util.go | 4 +- workflow/metrics/work_queue.go | 1 - workflow/metrics/work_queue_test.go | 1 - workflow/metrics/workflow_condition_metric.go | 18 ++++----- workflow/sync/mutex_test.go | 5 +-- workflow/sync/sync_manager.go | 20 +++++----- workflow/sync/sync_manager_test.go | 18 ++++----- workflow/sync/throttler.go | 1 + workflow/templateresolution/context.go | 14 +++---- workflow/ttlcontroller/ttlcontroller_test.go | 5 +-- workflow/util/merge_test.go | 4 +- workflow/util/retry/retry.go | 2 +- workflow/util/retry/retry_test.go | 2 +- workflow/util/util.go | 5 +-- workflow/util/util_test.go | 13 +++--- workflow/validate/validate.go | 11 ++--- workflow/validate/validate_test.go | 13 +++--- 174 files changed, 466 insertions(+), 627 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e1a586d8a505..66198083c14e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,4 +1,4 @@ -# https://github.com/golangci/golangci/wiki/Configuration +# https://golangci-lint.run/usage/quick-start/ run: skip-dirs: - pkg/client @@ -14,7 +14,20 @@ run: - functional linters: enable: + - deadcode + - errcheck - goimports + - gci + - gofumpt + - gosimple + - govet + - misspell + - staticcheck + - structcheck + - typecheck + - unparam + - unused + - varcheck linters-settings: goimports: local-prefixes: github.com/argoproj/argo-workflows/ diff --git a/Makefile b/Makefile index ab2442a6e5f3..c77ad10a8eae 100644 --- a/Makefile +++ b/Makefile @@ -360,7 +360,7 @@ manifests/install.yaml: $(CRDS) dist/kustomize # lint/test/etc $(GOPATH)/bin/golangci-lint: - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.33.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.36.0 .PHONY: lint lint: server/static/files.go $(GOPATH)/bin/golangci-lint diff --git a/cmd/argo/commands/archive/delete.go b/cmd/argo/commands/archive/delete.go index 33c7dd301eb1..b42871ffb0c3 100644 --- a/cmd/argo/commands/archive/delete.go +++ b/cmd/argo/commands/archive/delete.go @@ -11,7 +11,7 @@ import ( ) func NewDeleteCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "delete UID...", Run: func(cmd *cobra.Command, args []string) { ctx, apiClient := client.NewAPIClient() diff --git a/cmd/argo/commands/archive/get.go b/cmd/argo/commands/archive/get.go index ac4f0430a501..88724e95aa3a 100644 --- a/cmd/argo/commands/archive/get.go +++ b/cmd/argo/commands/archive/get.go @@ -16,10 +16,8 @@ import ( ) func NewGetCommand() *cobra.Command { - var ( - output string - ) - var command = &cobra.Command{ + var output string + command := &cobra.Command{ Use: "get UID", Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { diff --git a/cmd/argo/commands/archive/list.go b/cmd/argo/commands/archive/list.go index cdeec48ecf9a..e200d5083867 100644 --- a/cmd/argo/commands/archive/list.go +++ b/cmd/argo/commands/archive/list.go @@ -21,7 +21,7 @@ func NewListCommand() *cobra.Command { output string chunkSize int64 ) - var command = &cobra.Command{ + command := &cobra.Command{ Use: "list", Run: func(cmd *cobra.Command, args []string) { ctx, apiClient := client.NewAPIClient() diff --git a/cmd/argo/commands/archive/root.go b/cmd/argo/commands/archive/root.go index 5aedd4a98275..8a7f70661d84 100644 --- a/cmd/argo/commands/archive/root.go +++ b/cmd/argo/commands/archive/root.go @@ -5,7 +5,7 @@ import ( ) func NewArchiveCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "archive", Run: func(cmd *cobra.Command, args []string) { cmd.HelpFunc()(cmd, args) diff --git a/cmd/argo/commands/auth/root.go b/cmd/argo/commands/auth/root.go index 6857f8fbc24e..6c51bdc4a522 100644 --- a/cmd/argo/commands/auth/root.go +++ b/cmd/argo/commands/auth/root.go @@ -5,7 +5,7 @@ import ( ) func NewAuthCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "auth", Run: func(cmd *cobra.Command, args []string) { cmd.HelpFunc()(cmd, args) diff --git a/cmd/argo/commands/client/conn.go b/cmd/argo/commands/client/conn.go index 9e6d2b45ae61..7b4750f7bf30 100644 --- a/cmd/argo/commands/client/conn.go +++ b/cmd/argo/commands/client/conn.go @@ -12,8 +12,10 @@ import ( "github.com/argoproj/argo-workflows/v3/util/kubeconfig" ) -var argoServerOpts = apiclient.ArgoServerOpts{} -var instanceID string +var ( + argoServerOpts = apiclient.ArgoServerOpts{} + instanceID string +) var overrides = clientcmd.ConfigOverrides{} diff --git a/cmd/argo/commands/client/conn_test.go b/cmd/argo/commands/client/conn_test.go index ec9c41079c14..3029af566034 100644 --- a/cmd/argo/commands/client/conn_test.go +++ b/cmd/argo/commands/client/conn_test.go @@ -11,7 +11,6 @@ func TestGetAuthString(t *testing.T) { _ = os.Setenv("ARGO_TOKEN", "my-token") defer func() { _ = os.Unsetenv("ARGO_TOKEN") }() assert.Equal(t, "my-token", GetAuthString()) - } func TestNamespace(t *testing.T) { diff --git a/cmd/argo/commands/clustertemplate/create.go b/cmd/argo/commands/clustertemplate/create.go index c41e23402671..6cea87c6dd96 100644 --- a/cmd/argo/commands/clustertemplate/create.go +++ b/cmd/argo/commands/clustertemplate/create.go @@ -20,10 +20,8 @@ type cliCreateOpts struct { } func NewCreateCommand() *cobra.Command { - var ( - cliCreateOpts cliCreateOpts - ) - var command = &cobra.Command{ + var cliCreateOpts cliCreateOpts + command := &cobra.Command{ Use: "create FILE1 FILE2...", Short: "create a cluster workflow template", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/clustertemplate/create_test.go b/cmd/argo/commands/clustertemplate/create_test.go index 19ea43a828e2..09c551b581af 100644 --- a/cmd/argo/commands/clustertemplate/create_test.go +++ b/cmd/argo/commands/clustertemplate/create_test.go @@ -39,7 +39,6 @@ spec: ` func TestUnmarshalCWFT(t *testing.T) { - clusterwfts, err := unmarshalClusterWorkflowTemplates([]byte(cwfts), false) if assert.NoError(t, err) { assert.Equal(t, 2, len(clusterwfts)) diff --git a/cmd/argo/commands/clustertemplate/delete.go b/cmd/argo/commands/clustertemplate/delete.go index bdc6f8009720..88507b9c1f74 100644 --- a/cmd/argo/commands/clustertemplate/delete.go +++ b/cmd/argo/commands/clustertemplate/delete.go @@ -3,9 +3,8 @@ package clustertemplate import ( "fmt" - "github.com/spf13/cobra" - "github.com/argoproj/pkg/errors" + "github.com/spf13/cobra" "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate" @@ -13,11 +12,9 @@ import ( // NewDeleteCommand returns a new instance of an `argo delete` command func NewDeleteCommand() *cobra.Command { - var ( - all bool - ) + var all bool - var command = &cobra.Command{ + command := &cobra.Command{ Use: "delete WORKFLOW_TEMPLATE", Short: "delete a cluster workflow template", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/clustertemplate/get.go b/cmd/argo/commands/clustertemplate/get.go index 8a92f8ac244c..7d5c99fcd525 100644 --- a/cmd/argo/commands/clustertemplate/get.go +++ b/cmd/argo/commands/clustertemplate/get.go @@ -5,22 +5,19 @@ import ( "fmt" "log" + "github.com/argoproj/pkg/humanize" "github.com/spf13/cobra" "sigs.k8s.io/yaml" - "github.com/argoproj/pkg/humanize" - "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" clusterworkflowtmplpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) func NewGetCommand() *cobra.Command { - var ( - output string - ) + var output string - var command = &cobra.Command{ + command := &cobra.Command{ Use: "get CLUSTER WORKFLOW_TEMPLATE...", Short: "display details about a cluster workflow template", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/clustertemplate/lint.go b/cmd/argo/commands/clustertemplate/lint.go index a16bd8e5df7d..969690762730 100644 --- a/cmd/argo/commands/clustertemplate/lint.go +++ b/cmd/argo/commands/clustertemplate/lint.go @@ -5,9 +5,8 @@ import ( "os" "path/filepath" - "github.com/spf13/cobra" - "github.com/argoproj/pkg/errors" + "github.com/spf13/cobra" "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate" @@ -15,10 +14,8 @@ import ( ) func NewLintCommand() *cobra.Command { - var ( - strict bool - ) - var command = &cobra.Command{ + var strict bool + command := &cobra.Command{ Use: "lint FILE...", Short: "validate files or directories of cluster workflow template manifests", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/clustertemplate/list.go b/cmd/argo/commands/clustertemplate/list.go index 965cd3de9fe1..95295ecda598 100644 --- a/cmd/argo/commands/clustertemplate/list.go +++ b/cmd/argo/commands/clustertemplate/list.go @@ -18,10 +18,8 @@ type listFlags struct { } func NewListCommand() *cobra.Command { - var ( - listArgs listFlags - ) - var command = &cobra.Command{ + var listArgs listFlags + command := &cobra.Command{ Use: "list", Short: "list cluster workflow templates", Run: func(cmd *cobra.Command, args []string) { @@ -42,7 +40,6 @@ func NewListCommand() *cobra.Command { default: log.Fatalf("Unknown output mode: %s", listArgs.output) } - }, } command.Flags().StringVarP(&listArgs.output, "output", "o", "", "Output format. One of: wide|name") diff --git a/cmd/argo/commands/clustertemplate/root.go b/cmd/argo/commands/clustertemplate/root.go index 2ea8a972737e..3550f48b8b58 100644 --- a/cmd/argo/commands/clustertemplate/root.go +++ b/cmd/argo/commands/clustertemplate/root.go @@ -5,7 +5,7 @@ import ( ) func NewClusterTemplateCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "cluster-template", Aliases: []string{"cwftmpl", "cwft"}, Short: "manipulate cluster workflow templates", diff --git a/cmd/argo/commands/common.go b/cmd/argo/commands/common.go index f2c623ecf94a..6f941ac77f95 100644 --- a/cmd/argo/commands/common.go +++ b/cmd/argo/commands/common.go @@ -6,9 +6,8 @@ import ( "strconv" "strings" - "github.com/spf13/cobra" - log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) diff --git a/cmd/argo/commands/completion.go b/cmd/argo/commands/completion.go index 92ec0b00005d..60afda5d21e1 100644 --- a/cmd/argo/commands/completion.go +++ b/cmd/argo/commands/completion.go @@ -124,7 +124,7 @@ __argo_custom_func() { ) func NewCompletionCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "completion SHELL", Short: "output shell completion code for the specified shell (bash or zsh)", Long: `Write bash or zsh shell completion code to standard output. diff --git a/cmd/argo/commands/cron/create.go b/cmd/argo/commands/cron/create.go index 5f70f3ee0f45..f14e7a82ba84 100644 --- a/cmd/argo/commands/cron/create.go +++ b/cmd/argo/commands/cron/create.go @@ -10,7 +10,6 @@ import ( "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" cronworkflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/workflow/common" "github.com/argoproj/argo-workflows/v3/workflow/util" @@ -27,7 +26,7 @@ func NewCreateCommand() *cobra.Command { cliCreateOpts cliCreateOpts submitOpts wfv1.SubmitOpts ) - var command = &cobra.Command{ + command := &cobra.Command{ Use: "create FILE1 FILE2...", Short: "create a cron workflow", Run: func(cmd *cobra.Command, args []string) { @@ -48,7 +47,6 @@ func NewCreateCommand() *cobra.Command { } func CreateCronWorkflows(filePaths []string, cliOpts *cliCreateOpts, submitOpts *wfv1.SubmitOpts) { - ctx, apiClient := client.NewAPIClient() serviceClient := apiClient.NewCronWorkflowServiceClient() diff --git a/cmd/argo/commands/cron/delete.go b/cmd/argo/commands/cron/delete.go index 08db7458c525..0d1964d04fb3 100644 --- a/cmd/argo/commands/cron/delete.go +++ b/cmd/argo/commands/cron/delete.go @@ -10,11 +10,9 @@ import ( // NewDeleteCommand returns a new instance of an `argo delete` command func NewDeleteCommand() *cobra.Command { - var ( - all bool - ) + var all bool - var command = &cobra.Command{ + command := &cobra.Command{ Use: "delete [CRON_WORKFLOW... | --all]", Short: "delete a cron workflow", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/cron/get.go b/cmd/argo/commands/cron/get.go index 281ca796ad59..e7bb75745f26 100644 --- a/cmd/argo/commands/cron/get.go +++ b/cmd/argo/commands/cron/get.go @@ -18,11 +18,9 @@ import ( ) func NewGetCommand() *cobra.Command { - var ( - output string - ) + var output string - var command = &cobra.Command{ + command := &cobra.Command{ Use: "get CRON_WORKFLOW...", Short: "display details about a cron workflow", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/cron/lint.go b/cmd/argo/commands/cron/lint.go index 5d8184a7bf2d..9857e5879760 100644 --- a/cmd/argo/commands/cron/lint.go +++ b/cmd/argo/commands/cron/lint.go @@ -15,10 +15,8 @@ import ( ) func NewLintCommand() *cobra.Command { - var ( - strict bool - ) - var command = &cobra.Command{ + var strict bool + command := &cobra.Command{ Use: "lint FILE...", Short: "validate files or directories of cron workflow manifests", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/cron/list.go b/cmd/argo/commands/cron/list.go index e14958734747..ca75c9cfee91 100644 --- a/cmd/argo/commands/cron/list.go +++ b/cmd/argo/commands/cron/list.go @@ -9,7 +9,6 @@ import ( "github.com/argoproj/pkg/errors" "github.com/argoproj/pkg/humanize" - "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -25,10 +24,8 @@ type listFlags struct { } func NewListCommand() *cobra.Command { - var ( - listArgs listFlags - ) - var command = &cobra.Command{ + var listArgs listFlags + command := &cobra.Command{ Use: "list", Short: "list cron workflows", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/cron/resume.go b/cmd/argo/commands/cron/resume.go index 5bf8a01735db..59b807ae14fc 100644 --- a/cmd/argo/commands/cron/resume.go +++ b/cmd/argo/commands/cron/resume.go @@ -12,7 +12,7 @@ import ( // NewSuspendCommand returns a new instance of an `argo suspend` command func NewResumeCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "resume [CRON_WORKFLOW...]", Short: "resume zero or more cron workflows", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/cron/root.go b/cmd/argo/commands/cron/root.go index 7971ea67bdf7..0af6f0385c55 100644 --- a/cmd/argo/commands/cron/root.go +++ b/cmd/argo/commands/cron/root.go @@ -5,7 +5,7 @@ import ( ) func NewCronWorkflowCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "cron", Short: "manage cron workflows", Long: `NextScheduledRun assumes that the workflow-controller uses UTC as its timezone`, diff --git a/cmd/argo/commands/cron/suspend.go b/cmd/argo/commands/cron/suspend.go index aa97a024b58d..5209f12a8eb4 100644 --- a/cmd/argo/commands/cron/suspend.go +++ b/cmd/argo/commands/cron/suspend.go @@ -12,7 +12,7 @@ import ( // NewSuspendCommand returns a new instance of an `argo suspend` command func NewSuspendCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "suspend CRON_WORKFLOW...", Short: "suspend zero or more cron workflows", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/delete.go b/cmd/argo/commands/delete.go index 5a45704ca357..e145bced7c0a 100644 --- a/cmd/argo/commands/delete.go +++ b/cmd/argo/commands/delete.go @@ -23,7 +23,7 @@ func NewDeleteCommand() *cobra.Command { allNamespaces bool dryRun bool ) - var command = &cobra.Command{ + command := &cobra.Command{ Use: "delete [--dry-run] [WORKFLOW...|[--all] [--older] [--completed] [--resubmitted] [--prefix PREFIX] [--selector SELECTOR]]", Short: "delete workflows", Example: `# Delete a workflow: diff --git a/cmd/argo/commands/get.go b/cmd/argo/commands/get.go index 15a222702b81..37742ef66906 100644 --- a/cmd/argo/commands/get.go +++ b/cmd/argo/commands/get.go @@ -53,11 +53,9 @@ func (g getFlags) shouldPrint(node wfv1.NodeStatus) bool { } func NewGetCommand() *cobra.Command { - var ( - getArgs getFlags - ) + var getArgs getFlags - var command = &cobra.Command{ + command := &cobra.Command{ Use: "get WORKFLOW...", Short: "display details about a workflow", Example: `# Get information about a workflow: @@ -315,7 +313,6 @@ func insertSorted(wf *wfv1.Workflow, sortedArray []renderNode, item renderNode) func attachToParent(wf *wfv1.Workflow, n renderNode, nonBoundaryParentChildrenMap map[string]*nonBoundaryParentNode, boundaryID string, boundaryNodeMap map[string]*boundaryNode, parentBoundaryMap map[string][]renderNode) bool { - // Check first if I am a child of a nonBoundaryParent // that implies I attach to that instead of my boundary. This was already // figured out in Pass 1 @@ -344,7 +341,6 @@ func attachToParent(wf *wfv1.Workflow, n renderNode, // This takes the map of NodeStatus and converts them into a forrest // of trees of renderNodes and returns the set of roots for each tree func convertToRenderTrees(wf *wfv1.Workflow) map[string]renderNode { - renderTreeRoots := make(map[string]renderNode) // Used to store all boundary nodes so future render children can attach diff --git a/cmd/argo/commands/get_test.go b/cmd/argo/commands/get_test.go index 56fb96a99f07..05509f169ab7 100644 --- a/cmd/argo/commands/get_test.go +++ b/cmd/argo/commands/get_test.go @@ -14,12 +14,12 @@ import ( testutil "github.com/argoproj/argo-workflows/v3/test/util" ) -func testPrintNodeImpl(t *testing.T, expected string, node wfv1.NodeStatus, nodePrefix string, getArgs getFlags) { +func testPrintNodeImpl(t *testing.T, expected string, node wfv1.NodeStatus, getArgs getFlags) { var result bytes.Buffer w := tabwriter.NewWriter(&result, 0, 8, 1, '\t', 0) filtered, _ := filterNode(node, getArgs) if !filtered { - printNode(w, node, nodePrefix, getArgs) + printNode(w, node, "", getArgs) } err := w.Flush() assert.NoError(t, err) @@ -30,7 +30,6 @@ func testPrintNodeImpl(t *testing.T, expected string, node wfv1.NodeStatus, node func TestPrintNode(t *testing.T) { nodeName := "testNode" kubernetesNodeName := "testKnodeName" - nodePrefix := "" nodeTemplateName := "testTemplate" nodeTemplateRefName := "testTemplateRef" nodeID := "testID" @@ -52,55 +51,55 @@ func TestPrintNode(t *testing.T) { Message: nodeMessage, } node.HostNodeName = kubernetesNodeName - testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, "", nodeID, "0s", nodeMessage, ""), node, nodePrefix, getArgs) + testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, "", nodeID, "0s", nodeMessage, ""), node, getArgs) // Compatibility test getArgs.status = "Running" - testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeID, "0s", nodeMessage), node, nodePrefix, getArgs) + testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeID, "0s", nodeMessage), node, getArgs) getArgs.status = "" getArgs.nodeFieldSelectorString = "phase=Running" - testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeID, "0s", nodeMessage), node, nodePrefix, getArgs) + testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeID, "0s", nodeMessage), node, getArgs) getArgs.nodeFieldSelectorString = "phase!=foobar" - testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeID, "0s", nodeMessage), node, nodePrefix, getArgs) + testPrintNodeImpl(t, fmt.Sprintf("%s %s\t\t%s\t%s\t%s\t\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeID, "0s", nodeMessage), node, getArgs) getArgs.nodeFieldSelectorString = "phase!=Running" - testPrintNodeImpl(t, "", node, nodePrefix, getArgs) + testPrintNodeImpl(t, "", node, getArgs) // Compatibility test getArgs.nodeFieldSelectorString = "" getArgs.status = "foobar" - testPrintNodeImpl(t, "", node, nodePrefix, getArgs) + testPrintNodeImpl(t, "", node, getArgs) getArgs.status = "" getArgs.nodeFieldSelectorString = "phase=foobar" - testPrintNodeImpl(t, "", node, nodePrefix, getArgs) + testPrintNodeImpl(t, "", node, getArgs) getArgs = getFlags{ output: "", } node.TemplateName = nodeTemplateName - testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateName, nodeID, "0s", nodeMessage, ""), node, nodePrefix, getArgs) + testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateName, nodeID, "0s", nodeMessage, ""), node, getArgs) node.Type = wfv1.NodeTypeSuspend - testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", nodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateName, "", "", nodeMessage, ""), node, nodePrefix, getArgs) + testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", nodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateName, "", "", nodeMessage, ""), node, getArgs) node.TemplateRef = &wfv1.TemplateRef{ Name: nodeTemplateRefName, Template: nodeTemplateRefName, } - testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\n", nodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateRefName, nodeTemplateRefName, "", "", nodeMessage, ""), node, nodePrefix, getArgs) + testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\n", nodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateRefName, nodeTemplateRefName, "", "", nodeMessage, ""), node, getArgs) getArgs.output = "wide" - testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\t%s\t\n", nodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateRefName, nodeTemplateRefName, "", "", getArtifactsString(node), nodeMessage, ""), node, nodePrefix, getArgs) + testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\t%s\t\n", nodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateRefName, nodeTemplateRefName, "", "", getArtifactsString(node), nodeMessage, ""), node, getArgs) node.Type = wfv1.NodeTypePod - testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\t%s\t%s\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateRefName, nodeTemplateRefName, nodeID, "0s", getArtifactsString(node), nodeMessage, "", kubernetesNodeName), node, nodePrefix, getArgs) + testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\t%s\t%s\n", jobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateRefName, nodeTemplateRefName, nodeID, "0s", getArtifactsString(node), nodeMessage, "", kubernetesNodeName), node, getArgs) getArgs.status = "foobar" - testPrintNodeImpl(t, "", node, nodePrefix, getArgs) + testPrintNodeImpl(t, "", node, getArgs) } func TestStatusToNodeFieldSelector(t *testing.T) { diff --git a/cmd/argo/commands/lint.go b/cmd/argo/commands/lint.go index 7d9928fa7fb5..f5e30c638d5f 100644 --- a/cmd/argo/commands/lint.go +++ b/cmd/argo/commands/lint.go @@ -15,10 +15,8 @@ import ( ) func NewLintCommand() *cobra.Command { - var ( - strict bool - ) - var command = &cobra.Command{ + var strict bool + command := &cobra.Command{ Use: "lint FILE...", Short: "validate files or directories of workflow manifests", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/list.go b/cmd/argo/commands/list.go index 8e7dfa7fe554..40269d67c479 100644 --- a/cmd/argo/commands/list.go +++ b/cmd/argo/commands/list.go @@ -42,7 +42,7 @@ func NewListCommand() *cobra.Command { listArgs listFlags allNamespaces bool ) - var command = &cobra.Command{ + command := &cobra.Command{ Use: "list", Short: "list workflows", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/list_test.go b/cmd/argo/commands/list_test.go index 101b2a787d10..f6339ad28add 100644 --- a/cmd/argo/commands/list_test.go +++ b/cmd/argo/commands/list_test.go @@ -5,8 +5,6 @@ import ( "testing" "time" - "github.com/argoproj/argo-workflows/v3/workflow/common" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -14,6 +12,7 @@ import ( "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" workflowmocks "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow/mocks" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/workflow/common" ) func Test_listWorkflows(t *testing.T) { diff --git a/cmd/argo/commands/logs.go b/cmd/argo/commands/logs.go index 25e19301fe59..5da986dc7b34 100644 --- a/cmd/argo/commands/logs.go +++ b/cmd/argo/commands/logs.go @@ -25,7 +25,7 @@ func NewLogsCommand() *cobra.Command { tailLines int64 ) logOptions := &corev1.PodLogOptions{} - var command = &cobra.Command{ + command := &cobra.Command{ Use: "logs WORKFLOW [POD]", Short: "view logs of a pod or workflow", Example: `# Print the logs of a workflow: @@ -52,7 +52,6 @@ func NewLogsCommand() *cobra.Command { argo logs @latest `, Run: func(cmd *cobra.Command, args []string) { - // parse all the args workflow := "" podName := "" diff --git a/cmd/argo/commands/node.go b/cmd/argo/commands/node.go index ae355e2897de..ee73d79fe3d3 100644 --- a/cmd/argo/commands/node.go +++ b/cmd/argo/commands/node.go @@ -23,11 +23,9 @@ type setOps struct { } func NewNodeCommand() *cobra.Command { - var ( - setArgs setOps - ) + var setArgs setOps - var command = &cobra.Command{ + command := &cobra.Command{ Use: "node ACTION WORKFLOW FLAGS", Short: "perform action on a node in a workflow", Example: `# Set outputs to a node within a workflow: @@ -39,7 +37,6 @@ func NewNodeCommand() *cobra.Command { argo node set my-wf --message "We did it!"" --node-field-selector displayName=approve `, Run: func(cmd *cobra.Command, args []string) { - if len(args) < 1 { cmd.HelpFunc()(cmd, args) } diff --git a/cmd/argo/commands/resubmit.go b/cmd/argo/commands/resubmit.go index ec67aac7284f..1dfa11baee56 100644 --- a/cmd/argo/commands/resubmit.go +++ b/cmd/argo/commands/resubmit.go @@ -14,7 +14,7 @@ func NewResubmitCommand() *cobra.Command { priority int32 cliSubmitOpts cliSubmitOpts ) - var command = &cobra.Command{ + command := &cobra.Command{ Use: "resubmit [WORKFLOW...]", Short: "resubmit one or more workflows", Example: `# Resubmit a workflow: diff --git a/cmd/argo/commands/resume.go b/cmd/argo/commands/resume.go index c4263ce80241..6ef2b1e7e31f 100644 --- a/cmd/argo/commands/resume.go +++ b/cmd/argo/commands/resume.go @@ -16,11 +16,9 @@ type resumeOps struct { } func NewResumeCommand() *cobra.Command { - var ( - resumeArgs resumeOps - ) + var resumeArgs resumeOps - var command = &cobra.Command{ + command := &cobra.Command{ Use: "resume WORKFLOW1 WORKFLOW2...", Short: "resume zero or more workflows", Example: `# Resume a workflow that has been stopped or suspended: @@ -51,7 +49,6 @@ func NewResumeCommand() *cobra.Command { } fmt.Printf("workflow %s resumed\n", wfName) } - }, } command.Flags().StringVar(&resumeArgs.nodeFieldSelector, "node-field-selector", "", "selector of node to resume, eg: --node-field-selector inputs.paramaters.myparam.value=abc") diff --git a/cmd/argo/commands/retry.go b/cmd/argo/commands/retry.go index c96e0e901137..283c353c2972 100644 --- a/cmd/argo/commands/retry.go +++ b/cmd/argo/commands/retry.go @@ -21,7 +21,7 @@ func NewRetryCommand() *cobra.Command { cliSubmitOpts cliSubmitOpts retryOps retryOps ) - var command = &cobra.Command{ + command := &cobra.Command{ Use: "retry [WORKFLOW...]", Short: "retry zero or more workflows", Example: `# Retry a workflow: diff --git a/cmd/argo/commands/root.go b/cmd/argo/commands/root.go index 92507fb352f2..e7c703fc7584 100644 --- a/cmd/argo/commands/root.go +++ b/cmd/argo/commands/root.go @@ -22,7 +22,7 @@ const ( // NewCommand returns a new instance of an argo command func NewCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: CLIName, Short: "argo is the command line interface to Argo", Long: ` diff --git a/cmd/argo/commands/server.go b/cmd/argo/commands/server.go index 077064298950..4f508893280e 100644 --- a/cmd/argo/commands/server.go +++ b/cmd/argo/commands/server.go @@ -43,7 +43,7 @@ func NewServerCommand() *cobra.Command { accessControlAllowOrigin string ) - var command = cobra.Command{ + command := cobra.Command{ Use: "server", Short: "Start the Argo Server", Example: fmt.Sprintf(` diff --git a/cmd/argo/commands/stop.go b/cmd/argo/commands/stop.go index 880057597b2c..c2b7806dffc5 100644 --- a/cmd/argo/commands/stop.go +++ b/cmd/argo/commands/stop.go @@ -18,11 +18,9 @@ type stopOps struct { } func NewStopCommand() *cobra.Command { - var ( - stopArgs stopOps - ) + var stopArgs stopOps - var command = &cobra.Command{ + command := &cobra.Command{ Use: "stop WORKFLOW WORKFLOW2...", Short: "stop zero or more workflows allowing all exit handlers to run", Example: `# Stop a workflow: @@ -33,7 +31,6 @@ func NewStopCommand() *cobra.Command { argo stop @latest `, Run: func(cmd *cobra.Command, args []string) { - ctx, apiClient := client.NewAPIClient() serviceClient := apiClient.NewWorkflowServiceClient() namespace := client.Namespace() diff --git a/cmd/argo/commands/submit.go b/cmd/argo/commands/submit.go index f3a48aef7556..0e14cc0b313a 100644 --- a/cmd/argo/commands/submit.go +++ b/cmd/argo/commands/submit.go @@ -38,7 +38,7 @@ func NewSubmitCommand() *cobra.Command { priority int32 from string ) - var command = &cobra.Command{ + command := &cobra.Command{ Use: "submit [FILE... | --from `kind/name]", Short: "submit a workflow", Example: `# Submit multiple workflows from files: @@ -157,7 +157,6 @@ func validateOptions(workflows []wfv1.Workflow, submitOpts *wfv1.SubmitOpts, cli } func submitWorkflowFromResource(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, resourceIdentifier string, submitOpts *wfv1.SubmitOpts, cliOpts *cliSubmitOpts) { - parts := strings.SplitN(resourceIdentifier, "/", 2) if len(parts) != 2 { log.Fatalf("resource identifier '%s' is malformed. Should be `kind/name`, e.g. cronwf/hello-world-cwf", resourceIdentifier) @@ -185,7 +184,6 @@ func submitWorkflowFromResource(ctx context.Context, serviceClient workflowpkg.W } func submitWorkflows(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, workflows []wfv1.Workflow, submitOpts *wfv1.SubmitOpts, cliOpts *cliSubmitOpts) { - validateOptions(workflows, submitOpts, cliOpts) if len(workflows) == 0 { diff --git a/cmd/argo/commands/suspend.go b/cmd/argo/commands/suspend.go index a734a1cb5f8b..5f900f90d302 100644 --- a/cmd/argo/commands/suspend.go +++ b/cmd/argo/commands/suspend.go @@ -11,7 +11,7 @@ import ( ) func NewSuspendCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "suspend WORKFLOW1 WORKFLOW2...", Short: "suspend zero or more workflow", Example: `# Suspend a workflow: diff --git a/cmd/argo/commands/template/create.go b/cmd/argo/commands/template/create.go index 0e8848b8049b..b688b5c6f053 100644 --- a/cmd/argo/commands/template/create.go +++ b/cmd/argo/commands/template/create.go @@ -20,10 +20,8 @@ type cliCreateOpts struct { } func NewCreateCommand() *cobra.Command { - var ( - cliCreateOpts cliCreateOpts - ) - var command = &cobra.Command{ + var cliCreateOpts cliCreateOpts + command := &cobra.Command{ Use: "create FILE1 FILE2...", Short: "create a workflow template", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/template/delete.go b/cmd/argo/commands/template/delete.go index f4f681fe30cc..12f10a9c8b7b 100644 --- a/cmd/argo/commands/template/delete.go +++ b/cmd/argo/commands/template/delete.go @@ -6,7 +6,6 @@ import ( "log" "github.com/argoproj/pkg/errors" - "github.com/spf13/cobra" "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" @@ -15,11 +14,9 @@ import ( // NewDeleteCommand returns a new instance of an `argo delete` command func NewDeleteCommand() *cobra.Command { - var ( - all bool - ) + var all bool - var command = &cobra.Command{ + command := &cobra.Command{ Use: "delete WORKFLOW_TEMPLATE", Short: "delete a workflow template", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/template/get.go b/cmd/argo/commands/template/get.go index 5482d5c44e7e..2a73a085305c 100644 --- a/cmd/argo/commands/template/get.go +++ b/cmd/argo/commands/template/get.go @@ -5,22 +5,19 @@ import ( "fmt" "log" + "github.com/argoproj/pkg/humanize" "github.com/spf13/cobra" "sigs.k8s.io/yaml" - "github.com/argoproj/pkg/humanize" - "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" workflowtemplatepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowtemplate" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) func NewGetCommand() *cobra.Command { - var ( - output string - ) + var output string - var command = &cobra.Command{ + command := &cobra.Command{ Use: "get WORKFLOW_TEMPLATE...", Short: "display details about a workflow template", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/template/lint.go b/cmd/argo/commands/template/lint.go index 471005efed71..4bd9eb929f57 100644 --- a/cmd/argo/commands/template/lint.go +++ b/cmd/argo/commands/template/lint.go @@ -17,10 +17,8 @@ import ( ) func NewLintCommand() *cobra.Command { - var ( - strict bool - ) - var command = &cobra.Command{ + var strict bool + command := &cobra.Command{ Use: "lint (DIRECTORY | FILE1 FILE2 FILE3...)", Short: "validate a file or directory of workflow template manifests", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/template/list.go b/cmd/argo/commands/template/list.go index af9798da3279..2fab3026c73f 100644 --- a/cmd/argo/commands/template/list.go +++ b/cmd/argo/commands/template/list.go @@ -20,10 +20,8 @@ type listFlags struct { } func NewListCommand() *cobra.Command { - var ( - listArgs listFlags - ) - var command = &cobra.Command{ + var listArgs listFlags + command := &cobra.Command{ Use: "list", Short: "list workflow templates", Run: func(cmd *cobra.Command, args []string) { @@ -49,7 +47,6 @@ func NewListCommand() *cobra.Command { default: log.Fatalf("Unknown output mode: %s", listArgs.output) } - }, } command.Flags().BoolVarP(&listArgs.allNamespaces, "all-namespaces", "A", false, "Show workflows from all namespaces") diff --git a/cmd/argo/commands/template/root.go b/cmd/argo/commands/template/root.go index ad3154f89923..370a8978142e 100644 --- a/cmd/argo/commands/template/root.go +++ b/cmd/argo/commands/template/root.go @@ -5,7 +5,7 @@ import ( ) func NewTemplateCommand() *cobra.Command { - var command = &cobra.Command{ + command := &cobra.Command{ Use: "template", Short: "manipulate workflow templates", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argo/commands/terminate.go b/cmd/argo/commands/terminate.go index c3d22c819797..6019998509e8 100644 --- a/cmd/argo/commands/terminate.go +++ b/cmd/argo/commands/terminate.go @@ -43,7 +43,7 @@ func (t *terminateOption) convertToWorkflows(names []string) wfv1.Workflows { func NewTerminateCommand() *cobra.Command { t := &terminateOption{} - var command = &cobra.Command{ + command := &cobra.Command{ Use: "terminate WORKFLOW WORKFLOW2...", Short: "terminate zero or more workflows immediately", Example: `# Terminate a workflow: diff --git a/cmd/argo/commands/wait.go b/cmd/argo/commands/wait.go index 8a97e68cf153..913bb8102d49 100644 --- a/cmd/argo/commands/wait.go +++ b/cmd/argo/commands/wait.go @@ -21,10 +21,8 @@ import ( ) func NewWaitCommand() *cobra.Command { - var ( - ignoreNotFound bool - ) - var command = &cobra.Command{ + var ignoreNotFound bool + command := &cobra.Command{ Use: "wait [WORKFLOW...]", Short: "waits for workflows to complete", Example: `# Wait on a workflow: diff --git a/cmd/argo/commands/watch.go b/cmd/argo/commands/watch.go index 6a48ee41eacb..7793bf0d2aba 100644 --- a/cmd/argo/commands/watch.go +++ b/cmd/argo/commands/watch.go @@ -14,18 +14,15 @@ import ( "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client" workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/util" "github.com/argoproj/argo-workflows/v3/workflow/packer" ) func NewWatchCommand() *cobra.Command { - var ( - getArgs getFlags - ) + var getArgs getFlags - var command = &cobra.Command{ + command := &cobra.Command{ Use: "watch WORKFLOW", Short: "watch a workflow until it completes", Example: `# Watch a workflow: diff --git a/cmd/argo/main.go b/cmd/argo/main.go index 550597db3d9b..81fc7721a211 100644 --- a/cmd/argo/main.go +++ b/cmd/argo/main.go @@ -4,9 +4,10 @@ import ( "fmt" "os" - "github.com/argoproj/argo-workflows/v3/cmd/argo/commands" // load authentication plugin for obtaining credentials from cloud providers. _ "k8s.io/client-go/plugin/pkg/client/auth" + + "github.com/argoproj/argo-workflows/v3/cmd/argo/commands" ) func main() { diff --git a/cmd/argoexec/commands/init.go b/cmd/argoexec/commands/init.go index 733544606291..6e0c3dc50c3a 100644 --- a/cmd/argoexec/commands/init.go +++ b/cmd/argoexec/commands/init.go @@ -9,7 +9,7 @@ import ( ) func NewInitCommand() *cobra.Command { - var command = cobra.Command{ + command := cobra.Command{ Use: "init", Short: "Load artifacts", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argoexec/commands/resource.go b/cmd/argoexec/commands/resource.go index 27716b794b6c..5dc6fc875242 100644 --- a/cmd/argoexec/commands/resource.go +++ b/cmd/argoexec/commands/resource.go @@ -12,7 +12,7 @@ import ( ) func NewResourceCommand() *cobra.Command { - var command = cobra.Command{ + command := cobra.Command{ Use: "resource (get|create|apply|delete) MANIFEST", Short: "update a resource and wait for resource conditions", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argoexec/commands/root.go b/cmd/argoexec/commands/root.go index 0dab6fa2329f..698a8100a557 100644 --- a/cmd/argoexec/commands/root.go +++ b/cmd/argoexec/commands/root.go @@ -51,7 +51,7 @@ func initConfig() { } func NewRootCommand() *cobra.Command { - var command = cobra.Command{ + command := cobra.Command{ Use: CLIName, Short: "argoexec is the executor sidecar to workflow containers", Run: func(cmd *cobra.Command, args []string) { @@ -99,7 +99,7 @@ func initExecutor() *executor.WorkflowExecutor { var cre executor.ContainerRuntimeExecutor switch executorType { case common.ContainerRuntimeExecutorK8sAPI: - cre, err = k8sapi.NewK8sAPIExecutor(clientset, config, podName, namespace) + cre = k8sapi.NewK8sAPIExecutor(clientset, config, podName, namespace) case common.ContainerRuntimeExecutorKubelet: cre, err = kubelet.NewKubeletExecutor(namespace, podName) case common.ContainerRuntimeExecutorPNS: diff --git a/cmd/argoexec/commands/wait.go b/cmd/argoexec/commands/wait.go index efb46b8909e9..1541d92fd036 100644 --- a/cmd/argoexec/commands/wait.go +++ b/cmd/argoexec/commands/wait.go @@ -10,7 +10,7 @@ import ( ) func NewWaitCommand() *cobra.Command { - var command = cobra.Command{ + command := cobra.Command{ Use: "wait", Short: "wait for main container to finish and save artifacts", Run: func(cmd *cobra.Command, args []string) { diff --git a/cmd/argoexec/main.go b/cmd/argoexec/main.go index 3cb4854fc70d..604d82b865be 100644 --- a/cmd/argoexec/main.go +++ b/cmd/argoexec/main.go @@ -4,9 +4,10 @@ import ( "fmt" "os" - "github.com/argoproj/argo-workflows/v3/cmd/argoexec/commands" // load authentication plugin for obtaining credentials from cloud providers. _ "k8s.io/client-go/plugin/pkg/client/auth" + + "github.com/argoproj/argo-workflows/v3/cmd/argoexec/commands" ) func main() { diff --git a/cmd/workflow-controller/main.go b/cmd/workflow-controller/main.go index 50b6c9249b5d..060a51bb1800 100644 --- a/cmd/workflow-controller/main.go +++ b/cmd/workflow-controller/main.go @@ -52,7 +52,7 @@ func NewRootCommand() *cobra.Command { managedNamespace string // --managed-namespace ) - var command = cobra.Command{ + command := cobra.Command{ Use: CLIName, Short: "workflow-controller is the controller to operate on workflows", RunE: func(c *cobra.Command, args []string) error { diff --git a/config/config.go b/config/config.go index 18622a72516c..6367257bf269 100644 --- a/config/config.go +++ b/config/config.go @@ -101,7 +101,7 @@ type Config struct { // WorkflowRestrictions restricts the controller to executing Workflows that meet certain restrictions WorkflowRestrictions *WorkflowRestrictions `json:"workflowRestrictions,omitempty"` - //Adding configurable initial delay (for K8S clusters with mutating webhooks) to prevent workflow getting modified by MWC. + // Adding configurable initial delay (for K8S clusters with mutating webhooks) to prevent workflow getting modified by MWC. InitialDelay metav1.Duration `json:"initialDelay,omitempty"` } diff --git a/config/controller.go b/config/controller.go index c681ac19d0bd..84f029e1f7bf 100644 --- a/config/controller.go +++ b/config/controller.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + log "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" apierr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -13,8 +14,6 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - - log "github.com/sirupsen/logrus" "sigs.k8s.io/yaml" ) diff --git a/errors/errors.go b/errors/errors.go index 1fbe662557a7..545bfef3fbc1 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -157,5 +157,4 @@ func IsCode(code string, err error) bool { return argoErr.code == code } return false - } diff --git a/hack/docgen.go b/hack/docgen.go index e8d8615f5600..d808e4e748cf 100644 --- a/hack/docgen.go +++ b/hack/docgen.go @@ -200,8 +200,10 @@ type Set map[string]bool func NewDocGeneratorContext() *DocGeneratorContext { return &DocGeneratorContext{ doneFields: make(Set), - queue: []string{"io.argoproj.workflow.v1alpha1.Workflow", "io.argoproj.workflow.v1alpha1.CronWorkflow", - "io.argoproj.workflow.v1alpha1.WorkflowTemplate"}, + queue: []string{ + "io.argoproj.workflow.v1alpha1.Workflow", "io.argoproj.workflow.v1alpha1.CronWorkflow", + "io.argoproj.workflow.v1alpha1.WorkflowTemplate", + }, external: []string{}, index: make(map[string]Set), jsonName: make(map[string]string), diff --git a/hack/swagger/kubeifyswagger.go b/hack/swagger/kubeifyswagger.go index 0ba30599e1d3..cc68c626965e 100644 --- a/hack/swagger/kubeifyswagger.go +++ b/hack/swagger/kubeifyswagger.go @@ -33,7 +33,7 @@ func kubeifySwagger(in, out string) { } } - //loop again to handle any new bad definitions + // loop again to handle any new bad definitions for _, d := range definitions { props, ok := d.(obj)["properties"].(obj) if ok { diff --git a/hack/swagger/types.go b/hack/swagger/types.go index 4ef70413d4ed..c787c00d5f0c 100644 --- a/hack/swagger/types.go +++ b/hack/swagger/types.go @@ -1,4 +1,6 @@ package main -type obj = map[string]interface{} -type array = []interface{} +type ( + obj = map[string]interface{} + array = []interface{} +) diff --git a/persist/sqldb/explosive_offload_node_status_repo.go b/persist/sqldb/explosive_offload_node_status_repo.go index 5e4a621444b4..85716a972a48 100644 --- a/persist/sqldb/explosive_offload_node_status_repo.go +++ b/persist/sqldb/explosive_offload_node_status_repo.go @@ -6,11 +6,12 @@ import ( wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) -var ExplosiveOffloadNodeStatusRepo OffloadNodeStatusRepo = &explosiveOffloadNodeStatusRepo{} -var OffloadNotSupportedError = fmt.Errorf("offload node status is not supported") +var ( + ExplosiveOffloadNodeStatusRepo OffloadNodeStatusRepo = &explosiveOffloadNodeStatusRepo{} + OffloadNotSupportedError = fmt.Errorf("offload node status is not supported") +) -type explosiveOffloadNodeStatusRepo struct { -} +type explosiveOffloadNodeStatusRepo struct{} func (n *explosiveOffloadNodeStatusRepo) IsEnabled() bool { return false diff --git a/persist/sqldb/null_workflow_archive.go b/persist/sqldb/null_workflow_archive.go index 6788a4b874d1..e572ca096ca7 100644 --- a/persist/sqldb/null_workflow_archive.go +++ b/persist/sqldb/null_workflow_archive.go @@ -11,8 +11,7 @@ import ( var NullWorkflowArchive WorkflowArchive = &nullWorkflowArchive{} -type nullWorkflowArchive struct { -} +type nullWorkflowArchive struct{} func (r *nullWorkflowArchive) IsEnabled() bool { return false diff --git a/persist/sqldb/offload_node_status_repo.go b/persist/sqldb/offload_node_status_repo.go index f6c6804b77d6..2daa4de7ddda 100644 --- a/persist/sqldb/offload_node_status_repo.go +++ b/persist/sqldb/offload_node_status_repo.go @@ -7,13 +7,12 @@ import ( "strings" "time" - "github.com/argoproj/argo-workflows/v3/util/env" - log "github.com/sirupsen/logrus" "upper.io/db.v3" "upper.io/db.v3/lib/sqlbuilder" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo-workflows/v3/util/env" ) const OffloadNodeStatusDisabled = "Workflow has offloaded nodes, but offloading has been disabled" @@ -71,7 +70,6 @@ func nodeStatusVersion(s wfv1.Nodes) (string, string, error) { } func (wdc *nodeOffloadRepo) Save(uid, namespace string, nodes wfv1.Nodes) (string, error) { - marshalled, version, err := nodeStatusVersion(nodes) if err != nil { return "", err diff --git a/persist/sqldb/sqldb.go b/persist/sqldb/sqldb.go index 7b3d68660645..5230737231ec 100644 --- a/persist/sqldb/sqldb.go +++ b/persist/sqldb/sqldb.go @@ -34,7 +34,6 @@ func CreateDBSession(kubectlConfig kubernetes.Interface, namespace string, persi // CreatePostGresDBSession creates postgresDB session func CreatePostGresDBSession(kubectlConfig kubernetes.Interface, namespace string, cfg *config.PostgreSQLConfig, persistPool *config.ConnectionPool) (sqlbuilder.Database, string, error) { - if cfg.TableName == "" { return nil, "", errors.InternalError("tableName is empty") } @@ -49,7 +48,7 @@ func CreatePostGresDBSession(kubectlConfig kubernetes.Interface, namespace strin return nil, "", err } - var settings = postgresql.ConnectionURL{ + settings := postgresql.ConnectionURL{ User: string(userNameByte), Password: string(passwordByte), Host: cfg.GetHostname(), @@ -80,7 +79,6 @@ func CreatePostGresDBSession(kubectlConfig kubernetes.Interface, namespace strin // CreateMySQLDBSession creates Mysql DB session func CreateMySQLDBSession(kubectlConfig kubernetes.Interface, namespace string, cfg *config.MySQLConfig, persistPool *config.ConnectionPool) (sqlbuilder.Database, string, error) { - if cfg.TableName == "" { return nil, "", errors.InternalError("tableName is empty") } diff --git a/persist/sqldb/workflow_archive.go b/persist/sqldb/workflow_archive.go index 176f45c6a0fb..3918c218b809 100644 --- a/persist/sqldb/workflow_archive.go +++ b/persist/sqldb/workflow_archive.go @@ -17,8 +17,10 @@ import ( "github.com/argoproj/argo-workflows/v3/util/instanceid" ) -const archiveTableName = "argo_archived_workflows" -const archiveLabelsTableName = archiveTableName + "_labels" +const ( + archiveTableName = "argo_archived_workflows" + archiveLabelsTableName = archiveTableName + "_labels" +) type archivedWorkflowMetadata struct { ClusterName string `db:"clustername"` diff --git a/pkg/apiclient/argo-kube-client.go b/pkg/apiclient/argo-kube-client.go index fac1eb110b8a..780af2e14c83 100644 --- a/pkg/apiclient/argo-kube-client.go +++ b/pkg/apiclient/argo-kube-client.go @@ -27,8 +27,10 @@ import ( "github.com/argoproj/argo-workflows/v3/util/instanceid" ) -var argoKubeOffloadNodeStatusRepo = sqldb.ExplosiveOffloadNodeStatusRepo -var NoArgoServerErr = fmt.Errorf("this is impossible if you are not using the Argo Server, see " + help.CLI) +var ( + argoKubeOffloadNodeStatusRepo = sqldb.ExplosiveOffloadNodeStatusRepo + NoArgoServerErr = fmt.Errorf("this is impossible if you are not using the Argo Server, see " + help.CLI) +) type argoKubeClient struct { instanceIDService instanceid.Service diff --git a/pkg/apiclient/http1/cluster-workflow-template-service-client.go b/pkg/apiclient/http1/cluster-workflow-template-service-client.go index 2306cd7265a4..1182f94598b6 100644 --- a/pkg/apiclient/http1/cluster-workflow-template-service-client.go +++ b/pkg/apiclient/http1/cluster-workflow-template-service-client.go @@ -19,7 +19,6 @@ func (h ClusterWorkflowTemplateServiceClient) CreateClusterWorkflowTemplate(_ co func (h ClusterWorkflowTemplateServiceClient) GetClusterWorkflowTemplate(_ context.Context, in *clusterworkflowtemplate.ClusterWorkflowTemplateGetRequest, _ ...grpc.CallOption) (*wfv1.ClusterWorkflowTemplate, error) { out := &wfv1.ClusterWorkflowTemplate{} return out, h.Get(in, out, "/api/v1/cluster-workflow-templates/{name}") - } func (h ClusterWorkflowTemplateServiceClient) ListClusterWorkflowTemplates(_ context.Context, in *clusterworkflowtemplate.ClusterWorkflowTemplateListRequest, _ ...grpc.CallOption) (*wfv1.ClusterWorkflowTemplateList, error) { diff --git a/pkg/apiclient/panic-intermediary.go b/pkg/apiclient/panic-intermediary.go index 4c0547084264..10a34a7cd516 100644 --- a/pkg/apiclient/panic-intermediary.go +++ b/pkg/apiclient/panic-intermediary.go @@ -2,8 +2,7 @@ package apiclient import "google.golang.org/grpc/metadata" -type panicIntermediary struct { -} +type panicIntermediary struct{} func (w abstractIntermediary) Header() (metadata.MD, error) { panic("implement me") diff --git a/pkg/apis/workflow/v1alpha1/register.go b/pkg/apis/workflow/v1alpha1/register.go index c9941f2d2047..3bb06f603e15 100644 --- a/pkg/apis/workflow/v1alpha1/register.go +++ b/pkg/apis/workflow/v1alpha1/register.go @@ -1,11 +1,11 @@ package v1alpha1 import ( - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" ) // SchemeGroupVersion is group version used to register these objects diff --git a/pkg/apis/workflow/v1alpha1/workflow_types.go b/pkg/apis/workflow/v1alpha1/workflow_types.go index ea063248e8ca..656e213af4b7 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_types.go +++ b/pkg/apis/workflow/v1alpha1/workflow_types.go @@ -138,14 +138,14 @@ func (w Workflows) Filter(predicate WorkflowPredicate) Workflows { } // GetTTLStrategy return TTLStrategy based on Order of precedence: -//1. Workflow, 2. WorkflowTemplate, 3. Workflowdefault +// 1. Workflow, 2. WorkflowTemplate, 3. Workflowdefault func (w *Workflow) GetTTLStrategy() *TTLStrategy { var ttlStrategy *TTLStrategy // TTLStrategy from WorkflowTemplate if w.Status.StoredWorkflowSpec != nil && w.Status.StoredWorkflowSpec.GetTTLStrategy() != nil { ttlStrategy = w.Status.StoredWorkflowSpec.GetTTLStrategy() } - //TTLStrategy from Workflow + // TTLStrategy from Workflow if w.Spec.GetTTLStrategy() != nil { ttlStrategy = w.Spec.GetTTLStrategy() } @@ -320,9 +320,9 @@ type WorkflowSpec struct { // container fields which are not strings (e.g. resource limits). PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,27,opt,name=podSpecPatch"` - //PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. - //Controller will automatically add the selector with workflow name, if selector is empty. - //Optional: Defaults to empty. + // PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. + // Controller will automatically add the selector with workflow name, if selector is empty. + // Optional: Defaults to empty. // +optional PodDisruptionBudget *policyv1beta.PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty" protobuf:"bytes,31,opt,name=podDisruptionBudget"` @@ -627,7 +627,6 @@ func (tmpl *Template) GetSidecarNames() []string { containerNames = append(containerNames, s.Name) } return containerNames - } type Artifacts []Artifact @@ -713,8 +712,7 @@ type ValueFrom struct { } // SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc. -type SuppliedValueFrom struct { -} +type SuppliedValueFrom struct{} // Artifact indicates an artifact to place at a specified path type Artifact struct { @@ -1207,7 +1205,7 @@ func SucceededPodNode(n NodeStatus) bool { // Children returns the children of the parent. func (s Nodes) Children(parentNodeId string) Nodes { - var childNodes = make(Nodes) + childNodes := make(Nodes) parentNode, ok := s[parentNodeId] if !ok { return childNodes @@ -1222,7 +1220,7 @@ func (s Nodes) Children(parentNodeId string) Nodes { // Filter returns the subset of the nodes that match the predicate, e.g. only failed nodes func (s Nodes) Filter(predicate func(NodeStatus) bool) Nodes { - var filteredNodes = make(Nodes) + filteredNodes := make(Nodes) for _, node := range s { if predicate(node) { filteredNodes[node.ID] = node @@ -1233,7 +1231,7 @@ func (s Nodes) Filter(predicate func(NodeStatus) bool) Nodes { // Map maps the nodes to new values, e.g. `x.Hostname` func (s Nodes) Map(f func(x NodeStatus) interface{}) map[string]interface{} { - var values = make(map[string]interface{}) + values := make(map[string]interface{}) for _, node := range s { values[node.ID] = f(node) } @@ -1340,8 +1338,7 @@ type Backoff struct { // RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. // In order to prevent running steps on the same host, it uses "kubernetes.io/hostname". -type RetryNodeAntiAffinity struct { -} +type RetryNodeAntiAffinity struct{} // RetryAffinity prevents running steps on the same host. type RetryAffinity struct { diff --git a/pkg/apis/workflow/v1alpha1/workflow_types_test.go b/pkg/apis/workflow/v1alpha1/workflow_types_test.go index a5fd70d5dbe1..f51ace8847f8 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_types_test.go +++ b/pkg/apis/workflow/v1alpha1/workflow_types_test.go @@ -55,22 +55,28 @@ func TestWorkflowHappenedBetween(t *testing.T) { assert.False(t, WorkflowRanBetween(t0, t3)(Workflow{})) assert.False(t, WorkflowRanBetween(t0, t1)(Workflow{ ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Time{Time: t0}}, - Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t1}}})) + Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t1}}, + })) assert.False(t, WorkflowRanBetween(t1, t2)(Workflow{ ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Time{Time: t0}}, - Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t1}}})) + Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t1}}, + })) assert.False(t, WorkflowRanBetween(t2, t3)(Workflow{ ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Time{Time: t0}}, - Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t1}}})) + Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t1}}, + })) assert.False(t, WorkflowRanBetween(t0, t1)(Workflow{ ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Time{Time: t1}}, - Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t2}}})) + Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t2}}, + })) assert.False(t, WorkflowRanBetween(t2, t3)(Workflow{ ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Time{Time: t1}}, - Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t2}}})) + Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t2}}, + })) assert.True(t, WorkflowRanBetween(t0, t3)(Workflow{ ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Time{Time: t1}}, - Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t2}}})) + Status: WorkflowStatus{FinishedAt: metav1.Time{Time: t2}}, + })) } func TestArtifactLocation_IsArchiveLogs(t *testing.T) { @@ -356,7 +362,7 @@ func TestNodes_Any(t *testing.T) { } func TestNodes_Children(t *testing.T) { - var nodes = Nodes{ + nodes := Nodes{ "node_0": NodeStatus{Name: "node_0", Phase: NodeFailed, Children: []string{"node_1", "node_2"}}, "node_1": NodeStatus{Name: "node_1", Phase: NodeFailed, Children: []string{}}, "node_2": NodeStatus{Name: "node_2", Phase: NodeRunning, Children: []string{}}, @@ -376,7 +382,7 @@ func TestNodes_Children(t *testing.T) { } func TestNodes_Filter(t *testing.T) { - var nodes = Nodes{ + nodes := Nodes{ "node_1": NodeStatus{ID: "node_1", Phase: NodeFailed}, "node_2": NodeStatus{ID: "node_2", Phase: NodeRunning}, "node_3": NodeStatus{ID: "node_3", Phase: NodeFailed}, @@ -395,9 +401,9 @@ func TestNodes_Filter(t *testing.T) { }) } -//Map(f func(x NodeStatus) interface{}) map[string]interface{} { +// Map(f func(x NodeStatus) interface{}) map[string]interface{} { func TestNodes_Map(t *testing.T) { - var nodes = Nodes{ + nodes := Nodes{ "node_1": NodeStatus{ID: "node_1", HostNodeName: "host_1"}, "node_2": NodeStatus{ID: "node_2", HostNodeName: "host_2"}, } diff --git a/server/apiserver/argoserver.go b/server/apiserver/argoserver.go index df0b2d419f3f..16c73ba5c293 100644 --- a/server/apiserver/argoserver.go +++ b/server/apiserver/argoserver.go @@ -149,8 +149,8 @@ func (as *argoServer) Run(ctx context.Context, port int, browserOpenFunc func(st config := v.(*Config) log.WithFields(log.Fields{"version": argo.GetVersion().Version, "instanceID": config.InstanceID}).Info("Starting Argo Server") instanceIDService := instanceid.NewService(config.InstanceID) - var offloadRepo = sqldb.ExplosiveOffloadNodeStatusRepo - var wfArchive = sqldb.NullWorkflowArchive + offloadRepo := sqldb.ExplosiveOffloadNodeStatusRepo + wfArchive := sqldb.NullWorkflowArchive persistence := config.Persistence if persistence != nil { session, tableName, err := sqldb.CreateDBSession(as.clients.Kubernetes, as.namespace, persistence) @@ -262,7 +262,6 @@ func (as *argoServer) newGRPCServer(instanceIDService instanceid.Service, offloa // newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented // using grpc-gateway as a proxy to the gRPC server. func (as *argoServer) newHTTPServer(ctx context.Context, port int, artifactServer *artifacts.ArtifactServer) *http.Server { - endpoint := fmt.Sprintf("localhost:%d", port) mux := http.NewServeMux() @@ -335,7 +334,6 @@ func (as *argoServer) restartOnConfigChange(interface{}) error { // checkServeErr checks the error from a .Serve() call to decide if it was a graceful shutdown func (as *argoServer) checkServeErr(name string, err error) { - if err != nil { if as.stopCh == nil { // a nil stopCh indicates a graceful shutdown diff --git a/server/artifacts/artifact_server.go b/server/artifacts/artifact_server.go index b75874d33dbb..70569258ce30 100644 --- a/server/artifacts/artifact_server.go +++ b/server/artifacts/artifact_server.go @@ -44,7 +44,6 @@ func newArtifactServer(authN auth.Gatekeeper, hydrator hydrator.Interface, wfArc } func (a *ArtifactServer) GetArtifact(w http.ResponseWriter, r *http.Request) { - ctx, err := a.gateKeeping(r) if err != nil { w.WriteHeader(401) @@ -75,7 +74,6 @@ func (a *ArtifactServer) GetArtifact(w http.ResponseWriter, r *http.Request) { } func (a *ArtifactServer) GetArtifactByUID(w http.ResponseWriter, r *http.Request) { - ctx, err := a.gateKeeping(r) if err != nil { w.WriteHeader(401) @@ -165,7 +163,6 @@ func (a *ArtifactServer) returnArtifact(ctx context.Context, w http.ResponseWrit } file, err := os.Open(tmpPath) - if err != nil { return err } @@ -173,7 +170,6 @@ func (a *ArtifactServer) returnArtifact(ctx context.Context, w http.ResponseWrit defer file.Close() stats, err := file.Stat() - if err != nil { return err } diff --git a/server/artifacts/artifact_server_test.go b/server/artifacts/artifact_server_test.go index ae0893def6b5..dd2f79a07821 100644 --- a/server/artifacts/artifact_server_test.go +++ b/server/artifacts/artifact_server_test.go @@ -8,9 +8,6 @@ import ( "net/url" "testing" - artifact "github.com/argoproj/argo-workflows/v3/workflow/artifacts" - "github.com/argoproj/argo-workflows/v3/workflow/artifacts/resource" - "github.com/stretchr/testify/assert" testhttp "github.com/stretchr/testify/http" "github.com/stretchr/testify/mock" @@ -25,6 +22,8 @@ import ( authmocks "github.com/argoproj/argo-workflows/v3/server/auth/mocks" "github.com/argoproj/argo-workflows/v3/util/instanceid" armocks "github.com/argoproj/argo-workflows/v3/workflow/artifactrepositories/mocks" + artifact "github.com/argoproj/argo-workflows/v3/workflow/artifacts" + "github.com/argoproj/argo-workflows/v3/workflow/artifacts/resource" "github.com/argoproj/argo-workflows/v3/workflow/common" hydratorfake "github.com/argoproj/argo-workflows/v3/workflow/hydrator/fake" ) @@ -100,9 +99,11 @@ func newServer() *ArtifactServer { }, }, }, - }} + }, + } argo := fakewfv1.NewSimpleClientset(wf, &wfv1.Workflow{ - ObjectMeta: metav1.ObjectMeta{Namespace: "my-ns", Name: "your-wf"}}) + ObjectMeta: metav1.ObjectMeta{Namespace: "my-ns", Name: "your-wf"}, + }) ctx := context.WithValue(context.WithValue(context.Background(), auth.KubeKey, kube), auth.WfKey, argo) gatekeeper.On("Context", mock.Anything).Return(ctx, nil) a := &sqldbmocks.WorkflowArchive{} diff --git a/server/auth/mode_test.go b/server/auth/mode_test.go index 2a55e0e9fe08..07f593cad87a 100644 --- a/server/auth/mode_test.go +++ b/server/auth/mode_test.go @@ -36,6 +36,7 @@ func TestModes_Add(t *testing.T) { } }) } + func TestModes_GetMode(t *testing.T) { m := Modes{ Client: true, diff --git a/server/auth/serviceaccount/claims.go b/server/auth/serviceaccount/claims.go index 6de4b04f20ed..fa141c9bcd06 100644 --- a/server/auth/serviceaccount/claims.go +++ b/server/auth/serviceaccount/claims.go @@ -7,9 +7,8 @@ import ( "io/ioutil" "strings" - "k8s.io/client-go/rest" - "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/client-go/rest" "github.com/argoproj/argo-workflows/v3/server/auth/types" ) diff --git a/server/auth/sso/sso.go b/server/auth/sso/sso.go index 1185c04c0e87..4f5d7f4e941a 100644 --- a/server/auth/sso/sso.go +++ b/server/auth/sso/sso.go @@ -238,10 +238,12 @@ func (s *sso) HandleCallback(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(fmt.Sprintf("failed to get claims: %v", err))) return } - argoClaims := &types.Claims{Claims: jwt.Claims{ - Issuer: issuer, - Subject: c.Subject, - Expiry: jwt.NewNumericDate(time.Now().Add(s.expiry))}, + argoClaims := &types.Claims{ + Claims: jwt.Claims{ + Issuer: issuer, + Subject: c.Subject, + Expiry: jwt.NewNumericDate(time.Now().Add(s.expiry)), + }, Groups: c.Groups, Email: c.Email, EmailVerified: c.EmailVerified, diff --git a/server/event/dispatch/operation_test.go b/server/event/dispatch/operation_test.go index 3a5747f6e8d8..5279ca51d8d0 100644 --- a/server/event/dispatch/operation_test.go +++ b/server/event/dispatch/operation_test.go @@ -240,7 +240,8 @@ func Test_populateWorkflowMetadata(t *testing.T) { Submit: &wfv1.Submit{ WorkflowTemplateRef: wfv1.WorkflowTemplateRef{Name: "my-wft"}, ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"invalidLabel": "foo...bar"}}, + Labels: map[string]string{"invalidLabel": "foo...bar"}, + }, }, }, }, @@ -252,7 +253,8 @@ func Test_populateWorkflowMetadata(t *testing.T) { Submit: &wfv1.Submit{ WorkflowTemplateRef: wfv1.WorkflowTemplateRef{Name: "my-wft"}, ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"invalidAnnotation": "foo.[..]bar"}}, + Annotations: map[string]string{"invalidAnnotation": "foo.[..]bar"}, + }, }, }, }, diff --git a/server/event/event_server.go b/server/event/event_server.go index 1549f3f18bb7..376400c9cd20 100644 --- a/server/event/event_server.go +++ b/server/event/event_server.go @@ -39,7 +39,6 @@ func NewController(instanceIDService instanceid.Service, eventRecorderManager ev } func (s *Controller) Run(stopCh <-chan struct{}) { - // this `WaitGroup` allows us to wait for all events to dispatch before exiting wg := sync.WaitGroup{} @@ -66,7 +65,6 @@ func (s *Controller) Run(stopCh <-chan struct{}) { } func (s *Controller) ReceiveEvent(ctx context.Context, req *eventpkg.EventRequest) (*eventpkg.EventResponse, error) { - options := metav1.ListOptions{} s.instanceIDService.With(&options) diff --git a/server/types/clients.go b/server/types/clients.go index 89eb9a87572b..dfed8f59072e 100644 --- a/server/types/clients.go +++ b/server/types/clients.go @@ -1,10 +1,9 @@ package types import ( - "k8s.io/client-go/kubernetes" - eventsource "github.com/argoproj/argo-events/pkg/client/eventsource/clientset/versioned" sensor "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned" + "k8s.io/client-go/kubernetes" workflow "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" ) diff --git a/server/workflow/workflow_server.go b/server/workflow/workflow_server.go index 0b0733ffa7fc..2fe4788c5c36 100644 --- a/server/workflow/workflow_server.go +++ b/server/workflow/workflow_server.go @@ -62,7 +62,6 @@ func (s *workflowServer) CreateWorkflow(ctx context.Context, req *workflowpkg.Wo cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().ClusterWorkflowTemplates()) _, err := validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, req.Workflow, validate.ValidateOpts{}) - if err != nil { return nil, err } @@ -129,7 +128,7 @@ func (s *workflowServer) GetWorkflow(ctx context.Context, req *workflowpkg.Workf func (s *workflowServer) ListWorkflows(ctx context.Context, req *workflowpkg.WorkflowListRequest) (*wfv1.WorkflowList, error) { wfClient := auth.GetWfClient(ctx) - var listOption = &metav1.ListOptions{} + listOption := &metav1.ListOptions{} if req.ListOptions != nil { listOption = req.ListOptions } @@ -236,7 +235,7 @@ func (s *workflowServer) WatchWorkflows(req *workflowpkg.WatchWorkflowsRequest, func (s *workflowServer) WatchEvents(req *workflowpkg.WatchEventsRequest, ws workflowpkg.WorkflowService_WatchEventsServer) error { ctx := ws.Context() kubeClient := auth.GetKubeClient(ctx) - var opts = &metav1.ListOptions{} + opts := &metav1.ListOptions{} if req.ListOptions != nil { opts = req.ListOptions } @@ -488,7 +487,6 @@ func (s *workflowServer) LintWorkflow(ctx context.Context, req *workflowpkg.Work creator.Label(ctx, req.Workflow) _, err := validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, req.Workflow, validate.ValidateOpts{Lint: true}) - if err != nil { return nil, err } @@ -594,5 +592,4 @@ func (s *workflowServer) SubmitWorkflow(ctx context.Context, req *workflowpkg.Wo return nil, err } return wfClient.ArgoprojV1alpha1().Workflows(req.Namespace).Create(ctx, wf, metav1.CreateOptions{}) - } diff --git a/server/workflow/workflow_server_test.go b/server/workflow/workflow_server_test.go index 3f188317acb5..3c614e0298f2 100644 --- a/server/workflow/workflow_server_test.go +++ b/server/workflow/workflow_server_test.go @@ -119,6 +119,7 @@ const wf1 = ` } } ` + const wf2 = ` { "apiVersion": "argoproj.io/v1alpha1", @@ -181,6 +182,7 @@ const wf2 = ` } } ` + const wf3 = ` { "apiVersion": "argoproj.io/v1alpha1", @@ -243,6 +245,7 @@ const wf3 = ` } } ` + const wf4 = ` { "apiVersion": "argoproj.io/v1alpha1", @@ -305,6 +308,7 @@ const wf4 = ` } } ` + const wf5 = ` { "apiVersion": "argoproj.io/v1alpha1", @@ -395,6 +399,7 @@ const failedWf = ` } } ` + const workflow1 = ` { "namespace": "default", @@ -424,6 +429,7 @@ const workflow1 = ` } } ` + const workflowtmpl = ` { "apiVersion": "argoproj.io/v1alpha1", @@ -474,6 +480,7 @@ const workflowtmpl = ` } } ` + const cronwf = ` { "apiVersion": "argoproj.io/v1alpha1", @@ -510,6 +517,7 @@ const cronwf = ` } } ` + const clusterworkflowtmpl = ` { "apiVersion": "argoproj.io/v1alpha1", @@ -561,7 +569,6 @@ const clusterworkflowtmpl = ` ` func getWorkflowServer() (workflowpkg.WorkflowServiceServer, context.Context) { - var unlabelledObj, wfObj1, wfObj2, wfObj3, wfObj4, wfObj5, failedWfObj v1alpha1.Workflow var wftmpl v1alpha1.WorkflowTemplate var cwfTmpl v1alpha1.ClusterWorkflowTemplate @@ -672,7 +679,6 @@ func TestGetWorkflowWithNotFound(t *testing.T) { _, err := getWorkflow(ctx, server, "test", "unlabelled") assert.Error(t, err) }) - } func TestGetLatestWorkflow(t *testing.T) { diff --git a/server/workflowarchive/archived_workflow_server.go b/server/workflowarchive/archived_workflow_server.go index 2aecdfc86ce4..08e603daae3f 100644 --- a/server/workflowarchive/archived_workflow_server.go +++ b/server/workflowarchive/archived_workflow_server.go @@ -95,7 +95,6 @@ func (w *archivedWorkflowServer) ListArchivedWorkflows(ctx context.Context, req } items, err := w.wfArchive.ListWorkflows(namespace, minStartedAt, maxStartedAt, requirements, limitWithMore, offset) - if err != nil { return nil, err } diff --git a/test/e2e/argo_server_test.go b/test/e2e/argo_server_test.go index 43bcb0e8d1be..0097632cf307 100644 --- a/test/e2e/argo_server_test.go +++ b/test/e2e/argo_server_test.go @@ -341,6 +341,7 @@ func (s *ArgoServerSuite) TestUnauthorized() { Expect(). Status(401) } + func (s *ArgoServerSuite) TestCookieAuth() { token := s.bearerToken defer func() { s.bearerToken = token }() @@ -1093,7 +1094,6 @@ func (s *ArgoServerSuite) TestArtifactServer() { Expect(). Status(200) }) - } func (s *ArgoServerSuite) stream(url string, f func(t *testing.T, line string) (done bool)) { @@ -1329,7 +1329,6 @@ spec: } func (s *ArgoServerSuite) TestWorkflowTemplateService() { - s.Run("Lint", func() { s.e().POST("/api/v1/workflow-templates/argo/lint"). WithBytes([]byte(`{ @@ -1389,7 +1388,6 @@ func (s *ArgoServerSuite) TestWorkflowTemplateService() { }) s.Run("List", func() { - // make sure list options work correctly s.Given(). WorkflowTemplate("@smoke/workflow-template-whalesay-template.yaml"). @@ -1462,7 +1460,6 @@ func (s *ArgoServerSuite) TestWorkflowTemplateService() { } func (s *ArgoServerSuite) TestSubmitWorkflowFromResource() { - s.Run("CreateWFT", func() { s.e().POST("/api/v1/workflow-templates/argo"). WithBytes([]byte(`{ @@ -1585,7 +1582,6 @@ func (s *ArgoServerSuite) TestSubmitWorkflowFromResource() { Expect(). Status(200) }) - } func (s *ArgoServerSuite) TestEventSourcesService() { diff --git a/test/e2e/cli_test.go b/test/e2e/cli_test.go index 6f7f03d04437..f34e7b1d948e 100644 --- a/test/e2e/cli_test.go +++ b/test/e2e/cli_test.go @@ -14,7 +14,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" - corev1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -142,7 +141,6 @@ func (s *CLISuite) TestVersion() { RunCli([]string{"version"}, func(t *testing.T, output string, err error) { assert.NoError(t, err) }) - }) s.Run("Default", func() { s.Need(Server) @@ -411,7 +409,7 @@ func (s *CLISuite) TestRoot() { }). WaitForWorkflow(createdWorkflowName). Then(). - ExpectWorkflowName(createdWorkflowName, func(t *testing.T, metadata *corev1.ObjectMeta, status *wfv1.WorkflowStatus) { + ExpectWorkflowName(createdWorkflowName, func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) }) }) @@ -436,7 +434,7 @@ func (s *CLISuite) TestWorkflowSuspendResume() { }). WaitForWorkflow(). Then(). - ExpectWorkflow(func(t *testing.T, _ *corev1.ObjectMeta, status *wfv1.WorkflowStatus) { + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.WorkflowSucceeded, status.Phase) }) } @@ -467,7 +465,7 @@ func (s *CLISuite) TestNodeSuspendResume() { return wf.Status.Phase == wfv1.WorkflowFailed }), "suspended node"). Then(). - ExpectWorkflow(func(t *testing.T, _ *corev1.ObjectMeta, status *wfv1.WorkflowStatus) { + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { if assert.Equal(t, wfv1.WorkflowFailed, status.Phase) { r := regexp.MustCompile(`child '(node-suspend-[0-9]+)' failed`) res := r.FindStringSubmatch(status.Message) @@ -733,7 +731,7 @@ func (s *CLISuite) TestWorkflowLint() { func (s *CLISuite) TestWorkflowRetry() { s.Need(Offloading) - var retryTime corev1.Time + var retryTime metav1.Time s.Given(). Workflow("@testdata/retry-test.yaml"). @@ -763,7 +761,7 @@ func (s *CLISuite) TestWorkflowRetry() { return wf.Status.AnyActiveSuspendNode() }), "suspended node"). Then(). - ExpectWorkflow(func(t *testing.T, _ *corev1.ObjectMeta, status *wfv1.WorkflowStatus) { + ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { outerStepsPodNode := status.Nodes.FindByDisplayName("steps-outer-step1") innerStepsPodNode := status.Nodes.FindByDisplayName("steps-inner-step1") @@ -899,7 +897,6 @@ func (s *CLISuite) TestTemplate() { s.Given().RunCli([]string{"template", "get", "not-found"}, func(t *testing.T, output string, err error) { if assert.EqualError(t, err, "exit status 1") { assert.Contains(t, output, `"not-found" not found`) - } }).RunCli([]string{"template", "get", "workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) { if assert.NoError(t, err) { @@ -1067,7 +1064,6 @@ func (s *CLISuite) TestCron() { s.Given().RunCli([]string{"cron", "get", "not-found"}, func(t *testing.T, output string, err error) { if assert.EqualError(t, err, "exit status 1") { assert.Contains(t, output, `\"not-found\" not found`) - } }).RunCli([]string{"cron", "get", "test-cron-wf-basic"}, func(t *testing.T, output string, err error) { if assert.NoError(t, err) { @@ -1252,7 +1248,6 @@ func (s *CLISuite) TestResourceTemplateStopAndTerminate() { RunCli([]string{"delete", "resource-tmpl-wf"}, func(t *testing.T, output string, err error) { assert.Contains(t, output, "deleted") }) - }) s.Run("ResourceTemplateTerminate", func() { s.Given(). @@ -1272,7 +1267,6 @@ func (s *CLISuite) TestResourceTemplateStopAndTerminate() { RunCli([]string{"get", "resource-tmpl-wf-1"}, func(t *testing.T, output string, err error) { assert.Contains(t, output, "Stopped with strategy 'Terminate'") }) - }) } diff --git a/test/e2e/cluster_workflow_template_test.go b/test/e2e/cluster_workflow_template_test.go index 9b7f609ceee2..011bf0849133 100644 --- a/test/e2e/cluster_workflow_template_test.go +++ b/test/e2e/cluster_workflow_template_test.go @@ -70,7 +70,6 @@ spec: ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { assert.Equal(t, v1alpha1.WorkflowSucceeded, status.Phase) }) - } func TestClusterWorkflowTemplateSuite(t *testing.T) { diff --git a/test/e2e/fixtures/e2e_suite.go b/test/e2e/fixtures/e2e_suite.go index 9e968fe63890..caef4e0a9980 100644 --- a/test/e2e/fixtures/e2e_suite.go +++ b/test/e2e/fixtures/e2e_suite.go @@ -6,16 +6,15 @@ import ( "strings" "time" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" // load authentication plugin for obtaining credentials from cloud providers. _ "k8s.io/client-go/plugin/pkg/client/auth" - - "github.com/stretchr/testify/suite" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "github.com/argoproj/argo-workflows/v3/config" @@ -26,9 +25,11 @@ import ( "github.com/argoproj/argo-workflows/v3/workflow/hydrator" ) -const Namespace = "argo" -const Label = "argo-e2e" -const defaultTimeout = 30 * time.Second +const ( + Namespace = "argo" + Label = "argo-e2e" + defaultTimeout = 30 * time.Second +) type E2ESuite struct { suite.Suite @@ -73,11 +74,12 @@ func (s *E2ESuite) BeforeTest(string, string) { s.DeleteResources() } -var foreground = metav1.DeletePropagationForeground -var background = metav1.DeletePropagationBackground +var ( + foreground = metav1.DeletePropagationForeground + background = metav1.DeletePropagationBackground +) func (s *E2ESuite) DeleteResources() { - hasTestLabel := metav1.ListOptions{LabelSelector: Label} resources := []schema.GroupVersionResource{ {Group: workflow.Group, Version: workflow.Version, Resource: workflow.CronWorkflowPlural}, diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go index 0d89cb8f8984..29ec362379bb 100644 --- a/test/e2e/fixtures/when.go +++ b/test/e2e/fixtures/when.go @@ -165,13 +165,15 @@ func (w *When) CreateCronWorkflow() *When { type Condition func(wf *wfv1.Workflow) bool -var ToBeCompleted Condition = func(wf *wfv1.Workflow) bool { return wf.Labels[common.LabelKeyCompleted] == "true" } -var ToStart Condition = func(wf *wfv1.Workflow) bool { return !wf.Status.StartedAt.IsZero() } -var ToBeRunning Condition = func(wf *wfv1.Workflow) bool { - return wf.Status.Nodes.Any(func(node wfv1.NodeStatus) bool { - return node.Phase == wfv1.NodeRunning - }) -} +var ( + ToBeCompleted Condition = func(wf *wfv1.Workflow) bool { return wf.Labels[common.LabelKeyCompleted] == "true" } + ToStart Condition = func(wf *wfv1.Workflow) bool { return !wf.Status.StartedAt.IsZero() } + ToBeRunning Condition = func(wf *wfv1.Workflow) bool { + return wf.Status.Nodes.Any(func(node wfv1.NodeStatus) bool { + return node.Phase == wfv1.NodeRunning + }) + } +) var ToBeSucceeded Condition = func(wf *wfv1.Workflow) bool { return wf.Status.Phase == wfv1.WorkflowSucceeded } // `ToBeDone` replaces `ToFinish` which also makes sure the workflow is both complete not pending archiving. diff --git a/test/e2e/http_logger.go b/test/e2e/http_logger.go index 2443de626adf..7b3f99136d54 100644 --- a/test/e2e/http_logger.go +++ b/test/e2e/http_logger.go @@ -2,8 +2,7 @@ package e2e import log "github.com/sirupsen/logrus" -type httpLogger struct { -} +type httpLogger struct{} func (d *httpLogger) Logf(fmt string, args ...interface{}) { log.Debugf(fmt, args...) diff --git a/test/e2e/images/argosay/v2/main/argosay.go b/test/e2e/images/argosay/v2/main/argosay.go index 32a676f23e06..89f6167f892b 100644 --- a/test/e2e/images/argosay/v2/main/argosay.go +++ b/test/e2e/images/argosay/v2/main/argosay.go @@ -22,6 +22,7 @@ func main() { panic(err) } } + func argosay(args ...string) error { if len(args) == 0 { args = []string{"echo"} diff --git a/test/e2e/workflow_template_test.go b/test/e2e/workflow_template_test.go index df2637791432..29ebb8abff2c 100644 --- a/test/e2e/workflow_template_test.go +++ b/test/e2e/workflow_template_test.go @@ -67,7 +67,6 @@ spec: ExpectWorkflow(func(t *testing.T, metadata *v1.ObjectMeta, status *v1alpha1.WorkflowStatus) { assert.Equal(t, status.Phase, v1alpha1.WorkflowSucceeded) }) - } func (s *WorkflowTemplateSuite) TestSubmitWorkflowTemplateWithEnum() { diff --git a/test/stress/tool/main.go b/test/stress/tool/main.go index 648e1e476d15..fe4028dadaf1 100644 --- a/test/stress/tool/main.go +++ b/test/stress/tool/main.go @@ -13,7 +13,6 @@ import ( ) func main() { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() configOverrides := &clientcmd.ConfigOverrides{} kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) diff --git a/test/test.go b/test/test.go index 4582e51245ac..58b3534b68fd 100644 --- a/test/test.go +++ b/test/test.go @@ -9,9 +9,7 @@ import ( "github.com/argoproj/argo-workflows/v3/test/util" ) -var ( - testDir string -) +var testDir string func init() { _, filename, _, ok := runtime.Caller(0) diff --git a/test/util/indexer.go b/test/util/indexer.go index d654727ef490..c47e600576e5 100644 --- a/test/util/indexer.go +++ b/test/util/indexer.go @@ -34,6 +34,7 @@ func (i Indexer) ListIndexFuncValues(string) []string { panic("impl func (i Indexer) SetByIndex(indexName, indexedValue string, objs ...interface{}) { i.byIndex[indexName+"="+indexedValue] = objs } + func (i Indexer) ByIndex(indexName, indexedValue string) ([]interface{}, error) { return i.byIndex[indexName+"="+indexedValue], nil } diff --git a/test/util/serviceaccount.go b/test/util/serviceaccount.go index bb53dcd06c52..799b6cb3d0e1 100644 --- a/test/util/serviceaccount.go +++ b/test/util/serviceaccount.go @@ -21,7 +21,9 @@ func CreateServiceAccountWithToken(ctx context.Context, clientset kubernetes.Int Annotations: map[string]string{ corev1.ServiceAccountNameKey: sa.Name, corev1.ServiceAccountUIDKey: string(sa.UID), - }}, Type: corev1.SecretTypeServiceAccountToken}, + }, + }, Type: corev1.SecretTypeServiceAccountToken, + }, metav1.CreateOptions{}) if err != nil { return nil, err diff --git a/util/errors/errors_test.go b/util/errors/errors_test.go index 1e35cc65bb3b..1e59bb932f3c 100644 --- a/util/errors/errors_test.go +++ b/util/errors/errors_test.go @@ -18,10 +18,12 @@ func (n netError) Error() string { return string(n) } func (n netError) Timeout() bool { return false } func (n netError) Temporary() bool { return false } -var tlsHandshakeTimeoutErr net.Error = netError("net/http: TLS handshake timeout") -var ioTimeoutErr net.Error = netError("i/o timeout") -var connectionTimedout net.Error = netError("connection timed out") -var transientErr net.Error = netError("this error is transient") +var ( + tlsHandshakeTimeoutErr net.Error = netError("net/http: TLS handshake timeout") + ioTimeoutErr net.Error = netError("i/o timeout") + connectionTimedout net.Error = netError("connection timed out") + transientErr net.Error = netError("this error is transient") +) const transientEnvVarKey = "TRANSIENT_ERROR_PATTERN" diff --git a/util/file/fileutil.go b/util/file/fileutil.go index 0e24804afdb2..af0f95bc7165 100644 --- a/util/file/fileutil.go +++ b/util/file/fileutil.go @@ -37,7 +37,7 @@ func ExistsInTar(sourcePath string, tarReader TarReader) bool { return false } -//Close the file +// Close the file func close(f io.Closer) { err := f.Close() if err != nil { @@ -52,7 +52,6 @@ func CompressEncodeString(content string) string { // DecodeDecompressString will return decode and decompress the func DecodeDecompressString(content string) (string, error) { - buf, err := base64.StdEncoding.DecodeString(content) if err != nil { return "", err @@ -79,7 +78,6 @@ func CompressContent(content []byte) []byte { // DecompressContent will return the uncompressed content func DecompressContent(content []byte) ([]byte, error) { - buf := bytes.NewReader(content) gZipReader, _ := gzip.NewReader(buf) defer close(gZipReader) diff --git a/util/instanceid/service.go b/util/instanceid/service.go index d17943546f43..ebfdc12eb1da 100644 --- a/util/instanceid/service.go +++ b/util/instanceid/service.go @@ -55,7 +55,6 @@ func (s *service) Validate(obj metav1.Object) error { } } else if val, ok := l[common.LabelKeyControllerInstanceID]; ok && val == s.instanceID { return nil - } return fmt.Errorf("'%s' is not managed by the current Argo Server", obj.GetName()) } diff --git a/util/k8s/parse.go b/util/k8s/parse.go index 21ec65e4824e..080d034c0f78 100644 --- a/util/k8s/parse.go +++ b/util/k8s/parse.go @@ -35,4 +35,4 @@ func ParseRequest(r *http.Request) (verb string, kind string) { } return verb, kind -} \ No newline at end of file +} diff --git a/util/kubeconfig/kubeconfig.go b/util/kubeconfig/kubeconfig.go index 32d75cfc4543..3dcd32797b79 100644 --- a/util/kubeconfig/kubeconfig.go +++ b/util/kubeconfig/kubeconfig.go @@ -8,7 +8,6 @@ import ( "time" "github.com/pkg/errors" - "k8s.io/client-go/plugin/pkg/client/auth/exec" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -36,7 +35,6 @@ func IsBearerAuthScheme(token string) bool { } func GetRestConfig(token string) (*restclient.Config, error) { - if IsBasicAuthScheme(token) { token = strings.TrimSpace(strings.TrimPrefix(token, BasicAuthScheme)) username, password, ok := decodeBasicAuthToken(token) @@ -54,7 +52,6 @@ func GetRestConfig(token string) (*restclient.Config, error) { // convert a basic token (username, password) into a REST config func GetBasicRestConfig(username, password string) (*restclient.Config, error) { - restConfig, err := DefaultRestConfig() if err != nil { return nil, err @@ -68,7 +65,6 @@ func GetBasicRestConfig(username, password string) (*restclient.Config, error) { // convert a bearer token into a REST config func GetBearerRestConfig(token string) (*restclient.Config, error) { - restConfig, err := DefaultRestConfig() if err != nil { return nil, err @@ -83,10 +79,9 @@ func GetBearerRestConfig(token string) (*restclient.Config, error) { return restConfig, nil } -//Return the AuthString include Auth type(Basic or Bearer) +// Return the AuthString include Auth type(Basic or Bearer) func GetAuthString(in *restclient.Config, explicitKubeConfigPath string) (string, error) { - - //Checking Basic Auth + // Checking Basic Auth if in.Username != "" { token, err := GetBasicAuthToken(in) return BasicAuthScheme + " " + token, err @@ -97,7 +92,6 @@ func GetAuthString(in *restclient.Config, explicitKubeConfigPath string) (string } func GetBasicAuthToken(in *restclient.Config) (string, error) { - if in == nil { return "", errors.Errorf("RestClient can't be nil") } @@ -107,7 +101,6 @@ func GetBasicAuthToken(in *restclient.Config) (string, error) { // convert the REST config into a bearer token func GetBearerToken(in *restclient.Config, explicitKubeConfigPath string) (string, error) { - if len(in.BearerToken) > 0 { return in.BearerToken, nil } @@ -126,7 +119,7 @@ func GetBearerToken(in *restclient.Config, explicitKubeConfigPath string) (strin return "", err } - //This function will return error because of TLS Cert missing, + // This function will return error because of TLS Cert missing, // This code is not making actual request. We can ignore it. _ = auth.UpdateTransportConfig(tc) @@ -160,7 +153,6 @@ func encodeBasicAuthToken(username, password string) string { } func decodeBasicAuthToken(auth string) (username, password string, ok bool) { - c, err := base64.StdEncoding.DecodeString(auth) if err != nil { return diff --git a/util/kubeconfig/kubeconfig_test.go b/util/kubeconfig/kubeconfig_test.go index 824352a63081..693ef40aae41 100644 --- a/util/kubeconfig/kubeconfig_test.go +++ b/util/kubeconfig/kubeconfig_test.go @@ -33,9 +33,7 @@ users: ` func Test_BasicAuthString(t *testing.T) { - t.Run("Basic Auth", func(t *testing.T) { - restConfig, err := clientcmd.RESTConfigFromKubeConfig([]byte(config)) assert.NoError(t, err) authString, err := GetAuthString(restConfig, "") @@ -59,6 +57,5 @@ func Test_BasicAuthString(t *testing.T) { assert.Equal(t, "admin", config.Username) assert.Equal(t, "admin", config.Password) } - }) } diff --git a/util/logs/workflow-logger.go b/util/logs/workflow-logger.go index c1c7d13d5996..b338898ed830 100644 --- a/util/logs/workflow-logger.go +++ b/util/logs/workflow-logger.go @@ -38,7 +38,6 @@ type sender interface { } func WorkflowLogs(ctx context.Context, wfClient versioned.Interface, kubeClient kubernetes.Interface, req request, sender sender) error { - wfInterface := wfClient.ArgoprojV1alpha1().Workflows(req.GetNamespace()) _, err := wfInterface.Get(ctx, req.GetName(), metav1.GetOptions{}) if err != nil { diff --git a/util/util.go b/util/util.go index 3c718931a2f0..e51cab4ea1cc 100644 --- a/util/util.go +++ b/util/util.go @@ -8,10 +8,9 @@ import ( "strconv" "strings" - "k8s.io/apimachinery/pkg/fields" - apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes" "github.com/argoproj/argo-workflows/v3/errors" @@ -33,7 +32,6 @@ func Close(c Closer) { // GetSecrets retrieves a secret value and memoizes the result func GetSecrets(ctx context.Context, clientSet kubernetes.Interface, namespace, name, key string) ([]byte, error) { - secretsIf := clientSet.CoreV1().Secrets(namespace) var secret *apiv1.Secret err := waitutil.Backoff(retry.DefaultRetry, func() (bool, error) { diff --git a/workflow/artifacts/artifactory/artifactory.go b/workflow/artifacts/artifactory/artifactory.go index 7a841f7b52de..8f1b0aaf2ba8 100644 --- a/workflow/artifacts/artifactory/artifactory.go +++ b/workflow/artifacts/artifactory/artifactory.go @@ -16,7 +16,6 @@ type ArtifactoryArtifactDriver struct { // Download artifact from an artifactory URL func (a *ArtifactoryArtifactDriver) Load(artifact *wfv1.Artifact, path string) error { - lf, err := os.Create(path) if err != nil { return err @@ -51,7 +50,6 @@ func (a *ArtifactoryArtifactDriver) Load(artifact *wfv1.Artifact, path string) e // UpLoad artifact to an artifactory URL func (a *ArtifactoryArtifactDriver) Save(path string, artifact *wfv1.Artifact) error { - f, err := os.Open(path) if err != nil { return err diff --git a/workflow/artifacts/artifacts.go b/workflow/artifacts/artifacts.go index 24eb25c6d658..c56ef2dca486 100644 --- a/workflow/artifacts/artifacts.go +++ b/workflow/artifacts/artifacts.go @@ -4,14 +4,13 @@ import ( "context" "fmt" - "github.com/argoproj/argo-workflows/v3/workflow/artifacts/gcs" - "github.com/argoproj/argo-workflows/v3/workflow/artifacts/oss" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/workflow/artifacts/artifactory" + "github.com/argoproj/argo-workflows/v3/workflow/artifacts/gcs" "github.com/argoproj/argo-workflows/v3/workflow/artifacts/git" "github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs" "github.com/argoproj/argo-workflows/v3/workflow/artifacts/http" + "github.com/argoproj/argo-workflows/v3/workflow/artifacts/oss" "github.com/argoproj/argo-workflows/v3/workflow/artifacts/raw" "github.com/argoproj/argo-workflows/v3/workflow/artifacts/resource" "github.com/argoproj/argo-workflows/v3/workflow/artifacts/s3" diff --git a/workflow/artifacts/gcs/gcs.go b/workflow/artifacts/gcs/gcs.go index c10535eb4bfe..0b32ed0cf4dd 100644 --- a/workflow/artifacts/gcs/gcs.go +++ b/workflow/artifacts/gcs/gcs.go @@ -11,14 +11,13 @@ import ( "time" "cloud.google.com/go/storage" + "github.com/argoproj/pkg/file" log "github.com/sirupsen/logrus" "golang.org/x/oauth2/google" "google.golang.org/api/iterator" "google.golang.org/api/option" "k8s.io/apimachinery/pkg/util/wait" - "github.com/argoproj/pkg/file" - "github.com/argoproj/argo-workflows/v3/errors" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) diff --git a/workflow/artifacts/oss/oss.go b/workflow/artifacts/oss/oss.go index 2a6b982fa648..fb612c6f6548 100644 --- a/workflow/artifacts/oss/oss.go +++ b/workflow/artifacts/oss/oss.go @@ -3,11 +3,10 @@ package oss import ( "time" + "github.com/aliyun/aliyun-oss-go-sdk/oss" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" - "github.com/aliyun/aliyun-oss-go-sdk/oss" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) diff --git a/workflow/artifacts/raw/raw.go b/workflow/artifacts/raw/raw.go index 9ef9e4a2544b..714aed3b58f6 100644 --- a/workflow/artifacts/raw/raw.go +++ b/workflow/artifacts/raw/raw.go @@ -7,8 +7,7 @@ import ( wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) -type RawArtifactDriver struct { -} +type RawArtifactDriver struct{} // Store raw content as artifact func (a *RawArtifactDriver) Load(artifact *wfv1.Artifact, path string) error { diff --git a/workflow/artifacts/raw/raw_test.go b/workflow/artifacts/raw/raw_test.go index 3ece6013c1e7..822e08a3072a 100644 --- a/workflow/artifacts/raw/raw_test.go +++ b/workflow/artifacts/raw/raw_test.go @@ -18,7 +18,6 @@ const ( ) func TestLoad(t *testing.T) { - content := fmt.Sprintf("time: %v", time.Now().UnixNano()) lf, err := ioutil.TempFile("", LoadFileName) assert.NoError(t, err) @@ -35,5 +34,4 @@ func TestLoad(t *testing.T) { dat, err := ioutil.ReadFile(lf.Name()) assert.NoError(t, err) assert.Equal(t, content, string(dat)) - } diff --git a/workflow/artifacts/s3/s3.go b/workflow/artifacts/s3/s3.go index 78934bdee947..c97e3b728e51 100644 --- a/workflow/artifacts/s3/s3.go +++ b/workflow/artifacts/s3/s3.go @@ -5,14 +5,12 @@ import ( "os" "time" + "github.com/argoproj/pkg/file" + argos3 "github.com/argoproj/pkg/s3" "github.com/minio/minio-go/v7" - log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/wait" - "github.com/argoproj/pkg/file" - argos3 "github.com/argoproj/pkg/s3" - "github.com/argoproj/argo-workflows/v3/errors" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/workflow/common" diff --git a/workflow/common/ancestry.go b/workflow/common/ancestry.go index f770d01491a9..fb241f134b6d 100644 --- a/workflow/common/ancestry.go +++ b/workflow/common/ancestry.go @@ -59,7 +59,7 @@ func GetTaskDependencies(task *wfv1.DAGTask, ctx DagContext) (map[string]Depende split := strings.Split(match, ".") if split[1] == string(TaskResultAnySucceeded) || split[1] == string(TaskResultAllFailed) { dependencies[split[0]] = DependencyTypeItems - } else if _, ok := dependencies[split[0]]; !ok { //DependencyTypeItems takes precedence + } else if _, ok := dependencies[split[0]]; !ok { // DependencyTypeItems takes precedence dependencies[split[0]] = DependencyTypeTask } } else if matchGroup[4] != -1 { diff --git a/workflow/common/ancestry_test.go b/workflow/common/ancestry_test.go index d32a1160190e..bded4df67e01 100644 --- a/workflow/common/ancestry_test.go +++ b/workflow/common/ancestry_test.go @@ -77,7 +77,6 @@ func TestGetTaskDependenciesFromDepends(t *testing.T) { deps, logic = GetTaskDependencies(task, ctx) assert.Equal(t, map[string]DependencyType{"task-1": DependencyTypeTask}, deps) assert.Equal(t, "(task-1.Succeeded || task-1.Skipped || task-1.Daemoned || task-1.Errored || task-1.Failed)", logic) - } func TestValidateTaskResults(t *testing.T) { diff --git a/workflow/common/common_test.go b/workflow/common/common_test.go index 4552cd849efa..b02d5b203654 100644 --- a/workflow/common/common_test.go +++ b/workflow/common/common_test.go @@ -3,9 +3,8 @@ package common import ( "testing" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) func TestUnstructuredHasCompletedLabel(t *testing.T) { diff --git a/workflow/common/convert_test.go b/workflow/common/convert_test.go index fbea81940de3..746b9661ffd1 100644 --- a/workflow/common/convert_test.go +++ b/workflow/common/convert_test.go @@ -158,7 +158,6 @@ func TestConvertWorkflowTemplateToWorkflow(t *testing.T) { assert.Contains(t, wf.Annotations, "annotation1") }) t.Run("ConvertWorkflowFromWFTWithNilWorkflowMetadata", func(t *testing.T) { - wf := NewWorkflowFromWorkflowTemplate(wfTmpl.Name, nil, false) assert.NotNil(t, wf) assert.Equal(t, "workflow-template-whalesay-template", wf.Labels["workflows.argoproj.io/workflow-template"]) @@ -178,7 +177,6 @@ func TestConvertWorkflowTemplateToWorkflow(t *testing.T) { assert.Equal(t, wfTmpl.Name, wf.Spec.WorkflowTemplateRef.Name) assert.False(t, wf.Spec.WorkflowTemplateRef.ClusterScope) }) - } func TestConvertClusterWorkflowTemplateToWorkflow(t *testing.T) { diff --git a/workflow/common/params.go b/workflow/common/params.go index 557e257cd17f..fa7314e8ebf1 100644 --- a/workflow/common/params.go +++ b/workflow/common/params.go @@ -3,7 +3,7 @@ package common // Parameters extends string map with useful methods. type Parameters map[string]string -// Merge merges given parameteres. +// Merge merges given parameters. func (ps Parameters) Merge(args ...Parameters) Parameters { newParams := ps.DeepCopy() for _, params := range args { diff --git a/workflow/common/util.go b/workflow/common/util.go index 096ab9cc92c3..48e1fa6c8bc2 100644 --- a/workflow/common/util.go +++ b/workflow/common/util.go @@ -158,7 +158,7 @@ func (w *websocketReadCloser) WebsocketCallback(ws *websocket.Conn, resp *http.R for { _, body, err := ws.ReadMessage() if len(body) > 0 { - //log.Debugf("%d: %s", msgType, string(body)) + // log.Debugf("%d: %s", msgType, string(body)) _, writeErr := w.Write(body) if writeErr != nil { return writeErr @@ -197,7 +197,7 @@ type websocketReadCloser struct { } func (w *websocketReadCloser) Close() error { - //return w.conn.Close() + // return w.conn.Close() return nil } @@ -310,7 +310,7 @@ func SubstituteParams(tmpl *wfv1.Template, globalParams, localParams Parameters) replaceMap := globalParams.Merge(localParams) fstTmpl, err := fasttemplate.NewTemplate(string(tmplBytes), "{{", "}}") if err != nil { - return nil, fmt.Errorf("unable to parse argo varaible: %w", err) + return nil, fmt.Errorf("unable to parse argo variable: %w", err) } globalReplacedTmplStr, err := Replace(fstTmpl, replaceMap, true) if err != nil { @@ -329,7 +329,7 @@ func SubstituteParams(tmpl *wfv1.Template, globalParams, localParams Parameters) } replaceMap["inputs.parameters."+inParam.Name] = inParam.Value.String() } - //allow {{inputs.parameters}} to fetch the entire input parameters list as JSON + // allow {{inputs.parameters}} to fetch the entire input parameters list as JSON jsonInputParametersBytes, err := json.Marshal(globalReplacedTmpl.Inputs.Parameters) if err != nil { return nil, errors.InternalWrapError(err) @@ -353,7 +353,7 @@ func SubstituteParams(tmpl *wfv1.Template, globalParams, localParams Parameters) fstTmpl, err = fasttemplate.NewTemplate(globalReplacedTmplStr, "{{", "}}") if err != nil { - return nil, fmt.Errorf("unable to parse argo varaible: %w", err) + return nil, fmt.Errorf("unable to parse argo variable: %w", err) } s, err := Replace(fstTmpl, replaceMap, true) if err != nil { diff --git a/workflow/common/util_test.go b/workflow/common/util_test.go index d965dd6c0420..248dd49f2d4a 100644 --- a/workflow/common/util_test.go +++ b/workflow/common/util_test.go @@ -4,9 +4,8 @@ import ( "context" "testing" - "github.com/valyala/fasttemplate" - "github.com/stretchr/testify/assert" + "github.com/valyala/fasttemplate" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" @@ -91,7 +90,6 @@ func TestDeletePod(t *testing.T) { } func TestNestedReplaceString(t *testing.T) { - replaceMap := map[string]string{"inputs.parameters.message": "hello world"} test := `{{- with secret "{{inputs.parameters.message}}" -}} @@ -162,7 +160,6 @@ func TestNestedReplaceString(t *testing.T) { } func TestReplaceStringWithWhiteSpace(t *testing.T) { - replaceMap := map[string]string{"inputs.parameters.message": "hello world"} test := `{{ inputs.parameters.message }}` diff --git a/workflow/controller/cache_test.go b/workflow/controller/cache_test.go index f589268f202d..6cd461dc4b67 100644 --- a/workflow/controller/cache_test.go +++ b/workflow/controller/cache_test.go @@ -70,7 +70,7 @@ func TestConfigMapCacheLoadMiss(t *testing.T) { func TestConfigMapCacheSave(t *testing.T) { var MockParamValue string = "Hello world" - var MockParam = wfv1.Parameter{ + MockParam := wfv1.Parameter{ Name: "hello", Value: wfv1.AnyStringPtr(MockParamValue), } diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index 86b0b3531ca8..3627a757a327 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -8,8 +8,6 @@ import ( "strconv" "time" - "github.com/argoproj/argo-workflows/v3/util/env" - "github.com/argoproj/pkg/errors" syncpkg "github.com/argoproj/pkg/sync" log "github.com/sirupsen/logrus" @@ -18,7 +16,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" - v1Label "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" @@ -43,6 +40,7 @@ import ( wfextvv1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/client/informers/externalversions/workflow/v1alpha1" authutil "github.com/argoproj/argo-workflows/v3/util/auth" "github.com/argoproj/argo-workflows/v3/util/diff" + "github.com/argoproj/argo-workflows/v3/util/env" errorsutil "github.com/argoproj/argo-workflows/v3/util/errors" "github.com/argoproj/argo-workflows/v3/workflow/artifactrepositories" "github.com/argoproj/argo-workflows/v3/workflow/common" @@ -313,8 +311,8 @@ func (wfc *WorkflowController) createSynchronizationManager(ctx context.Context) wfc.syncManager = sync.NewLockManager(getSyncLimit, nextWorkflow, isWFDeleted) - labelSelector := v1Label.NewSelector() - req, _ := v1Label.NewRequirement(common.LabelKeyPhase, selection.Equals, []string{string(wfv1.NodeRunning)}) + labelSelector := labels.NewSelector() + req, _ := labels.NewRequirement(common.LabelKeyPhase, selection.Equals, []string{string(wfv1.NodeRunning)}) if req != nil { labelSelector = labelSelector.Add(*req) } @@ -441,7 +439,8 @@ func (wfc *WorkflowController) processNextPodCleanupItem(ctx context.Context) bo propagation := metav1.DeletePropagationBackground err := pods.Delete(ctx, podName, metav1.DeleteOptions{ PropagationPolicy: &propagation, - GracePeriodSeconds: wfc.Config.PodGCGracePeriodSeconds}) + GracePeriodSeconds: wfc.Config.PodGCGracePeriodSeconds, + }) if err != nil && !apierr.IsNotFound(err) { return err } @@ -639,7 +638,7 @@ func (wfc *WorkflowController) processNextItem(ctx context.Context) bool { // TODO: operate should return error if it was unable to operate properly // so we can requeue the work for a later time // See: https://github.com/kubernetes/client-go/blob/master/examples/workqueue/main.go - //c.handleErr(err, key) + // c.handleErr(err, key) return true } @@ -903,7 +902,6 @@ func (wfc *WorkflowController) newPodInformer(ctx context.Context) cache.SharedI // Enqueue the workflow for deleted pod _ = wfc.enqueueWfFromPodLabel(obj) - }, }, ) diff --git a/workflow/controller/controller_test.go b/workflow/controller/controller_test.go index da219653c0e0..72299402bd74 100644 --- a/workflow/controller/controller_test.go +++ b/workflow/controller/controller_test.go @@ -476,6 +476,7 @@ spec: workflowTemplateRef: name: workflow-template-whalesay-template ` + const wfTmpl = ` apiVersion: argoproj.io/v1alpha1 kind: WorkflowTemplate @@ -504,8 +505,10 @@ func TestCheckAndInitWorkflowTmplRef(t *testing.T) { wftmpl := unmarshalWFTmpl(wfTmpl) cancel, controller := newController(wf, wftmpl) defer cancel() - woc := wfOperationCtx{controller: controller, - wf: wf} + woc := wfOperationCtx{ + controller: controller, + wf: wf, + } err := woc.setExecWorkflow() assert.NoError(t, err) assert.Equal(t, wftmpl.Spec.WorkflowSpec.Templates, woc.execWf.Spec.Templates) diff --git a/workflow/controller/dag.go b/workflow/controller/dag.go index f1be657177bc..fbd575bf46b9 100644 --- a/workflow/controller/dag.go +++ b/workflow/controller/dag.go @@ -126,7 +126,6 @@ func (d *dagContext) getTaskNode(taskName string) *wfv1.NodeStatus { // assessDAGPhase assesses the overall DAG status func (d *dagContext) assessDAGPhase(targetTasks []string, nodes wfv1.Nodes) wfv1.NodePhase { - // targetTaskPhases keeps track of all the phases of the target tasks. This is necessary because some target tasks may // be omitted and will not have an explicit phase. We would still like to deduce a phase for those tasks in order to // determine the overall phase of the DAG. To do so, an omitted task always inherits the phase of its parents, with @@ -376,7 +375,6 @@ func (woc *wfOperationCtx) executeDAGTask(ctx context.Context, dagCtx *dagContex } else { woc.addChildNode(taskGroupNode.Name, taskNodeName) } - } else { // Otherwise, add all outbound nodes of our dependencies as parents to this node for _, depName := range taskDependencies { @@ -539,7 +537,6 @@ func (woc *wfOperationCtx) buildLocalScopeFromTask(dagCtx *dagContext, task *wfv // resolveDependencyReferences replaces any references to outputs of task dependencies, or artifacts in the inputs // NOTE: by now, input parameters should have been substituted throughout the template func (woc *wfOperationCtx) resolveDependencyReferences(dagCtx *dagContext, task *wfv1.DAGTask) (*wfv1.DAGTask, error) { - scope, err := woc.buildLocalScopeFromTask(dagCtx, task) if err != nil { return nil, err @@ -559,7 +556,7 @@ func (woc *wfOperationCtx) resolveDependencyReferences(dagCtx *dagContext, task } fstTmpl, err := fasttemplate.NewTemplate(string(taskBytes), "{{", "}}") if err != nil { - return nil, fmt.Errorf("unable to parse argo varaible: %w", err) + return nil, fmt.Errorf("unable to parse argo variable: %w", err) } newTaskStr, err := common.Replace(fstTmpl, woc.globalParams.Merge(scope.getParameters()), true) @@ -664,7 +661,7 @@ func expandTask(task wfv1.DAGTask) ([]wfv1.DAGTask, error) { fstTmpl, err := fasttemplate.NewTemplate(string(taskBytes), "{{", "}}") if err != nil { - return nil, fmt.Errorf("unable to parse argo varaible: %w", err) + return nil, fmt.Errorf("unable to parse argo variable: %w", err) } expandedTasks := make([]wfv1.DAGTask, 0) for i, item := range items { diff --git a/workflow/controller/dag_test.go b/workflow/controller/dag_test.go index 5ce4d2af5601..d36c76088230 100644 --- a/workflow/controller/dag_test.go +++ b/workflow/controller/dag_test.go @@ -6,14 +6,13 @@ import ( "strings" "testing" - "github.com/argoproj/argo-workflows/v3/workflow/common" - "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/test" + "github.com/argoproj/argo-workflows/v3/workflow/common" ) // TestDagXfail verifies a DAG can fail properly @@ -404,7 +403,6 @@ func TestEvaluateAnyAllDependsLogic(t *testing.T) { assert.NoError(t, err) assert.True(t, proceed) assert.True(t, execute) - } func TestEvaluateDependsLogicWhenDaemonFailed(t *testing.T) { diff --git a/workflow/controller/estimation/dummy_estimator.go b/workflow/controller/estimation/dummy_estimator.go index 7423a89044f3..7a75764c87b1 100644 --- a/workflow/controller/estimation/dummy_estimator.go +++ b/workflow/controller/estimation/dummy_estimator.go @@ -11,6 +11,7 @@ type dummyEstimator struct{} func (e *dummyEstimator) EstimateWorkflowDuration() wfv1.EstimatedDuration { return wfv1.NewEstimatedDuration(time.Second) } + func (e *dummyEstimator) EstimateNodeDuration(string) wfv1.EstimatedDuration { return wfv1.NewEstimatedDuration(time.Second) } diff --git a/workflow/controller/exec_control.go b/workflow/controller/exec_control.go index 705fe9bcf8d0..1649178f38ba 100644 --- a/workflow/controller/exec_control.go +++ b/workflow/controller/exec_control.go @@ -48,7 +48,7 @@ func (woc *wfOperationCtx) applyExecutionControl(ctx context.Context, pod *apiv1 // Check if we are past the workflow deadline. If we are, and the pod is still pending // then we should simply delete it and mark the pod as Failed if woc.workflowDeadline != nil && time.Now().UTC().After(*woc.workflowDeadline) { - //pods that are part of an onExit handler aren't subject to the deadline + // pods that are part of an onExit handler aren't subject to the deadline _, onExitPod := pod.Labels[common.LabelKeyOnExit] if !onExitPod { woc.log.Infof("Deleting Pending pod %s/%s which has exceeded workflow deadline %s", pod.Namespace, pod.Name, woc.workflowDeadline) diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index d98fda64299d..0aac22268cb8 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -261,7 +261,6 @@ func (woc *wfOperationCtx) operate(ctx context.Context) { // Validate the execution wfSpec wfConditions, err := validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, woc.wf, validateOpts) - if err != nil { msg := fmt.Sprintf("invalid spec: %s", err.Error()) woc.markWorkflowFailed(ctx, msg) @@ -292,7 +291,7 @@ func (woc *wfOperationCtx) operate(ctx context.Context) { woc.workflowDeadline = woc.getWorkflowDeadline() err := woc.podReconciliation(ctx) if err == nil { - err = woc.failSuspendedAndPendingNodesAfterDeadlineOrShutdown() + woc.failSuspendedAndPendingNodesAfterDeadlineOrShutdown() } if err != nil { woc.log.WithError(err).WithField("workflow", woc.wf.ObjectMeta.Name).Error("workflow timeout") @@ -998,8 +997,8 @@ func (woc *wfOperationCtx) shouldPrintPodSpec(node wfv1.NodeStatus) bool { (woc.controller.Config.PodSpecLogStrategy.FailedPod && node.FailedOrError()) } -//fails any suspended and pending nodes if the workflow deadline has passed -func (woc *wfOperationCtx) failSuspendedAndPendingNodesAfterDeadlineOrShutdown() error { +// fails any suspended and pending nodes if the workflow deadline has passed +func (woc *wfOperationCtx) failSuspendedAndPendingNodesAfterDeadlineOrShutdown() { deadlineExceeded := woc.workflowDeadline != nil && time.Now().UTC().After(*woc.workflowDeadline) if woc.execWf.Spec.Shutdown != "" || deadlineExceeded { for _, node := range woc.wf.Status.Nodes { @@ -1014,13 +1013,12 @@ func (woc *wfOperationCtx) failSuspendedAndPendingNodesAfterDeadlineOrShutdown() } } } - return nil } // countActivePods counts the number of active (Pending/Running) pods. // Optionally restricts it to a template invocation (boundaryID) func (woc *wfOperationCtx) countActivePods(boundaryIDs ...string) int64 { - var boundaryID = "" + boundaryID := "" if len(boundaryIDs) > 0 { boundaryID = boundaryIDs[0] } @@ -1047,7 +1045,7 @@ func (woc *wfOperationCtx) countActivePods(boundaryIDs ...string) int64 { // countActiveChildren counts the number of active (Pending/Running) children nodes of parent parentName func (woc *wfOperationCtx) countActiveChildren(boundaryIDs ...string) int64 { - var boundaryID = "" + boundaryID := "" if len(boundaryIDs) > 0 { boundaryID = boundaryIDs[0] } @@ -1386,7 +1384,7 @@ func (woc *wfOperationCtx) createPVCs(ctx context.Context) error { } } - //continue + // continue if err != nil { return err } @@ -1768,7 +1766,7 @@ func (woc *wfOperationCtx) executeTemplate(ctx context.Context, nodeName string, // Swap the node back to retry node if retryNodeName != "" { retryNode := woc.wf.GetNodeByName(retryNodeName) - if !retryNode.Fulfilled() && node.Fulfilled() { //if the retry child has completed we need to update outself + if !retryNode.Fulfilled() && node.Fulfilled() { // if the retry child has completed we need to update outself node, err = woc.executeTemplate(ctx, retryNodeName, orgTmpl, tmplCtx, args, opts) if err != nil { return woc.markNodeError(node.Name, err), err @@ -2277,7 +2275,6 @@ func getTemplateOutputsFromScope(tmpl *wfv1.Template, scope *wfScope) (*wfv1.Out // hasOutputResultRef will check given template output has any reference func hasOutputResultRef(name string, parentTmpl *wfv1.Template) bool { - var variableRefName string if parentTmpl.DAG != nil { variableRefName = "{{tasks." + name + ".outputs.result}}" @@ -2406,7 +2403,7 @@ func (woc *wfOperationCtx) addOutputsToGlobalScope(outputs *wfv1.Outputs) { woc.addParamToGlobalScope(param) } for _, art := range outputs.Artifacts { - woc.addArtifactToGlobalScope(art, nil) + woc.addArtifactToGlobalScope(art) } } @@ -2426,11 +2423,13 @@ func parseLoopIndex(s string) int { } return val } + func (n loopNodes) Less(i, j int) bool { left := parseLoopIndex(n[i].DisplayName) right := parseLoopIndex(n[j].DisplayName) return left < right } + func (n loopNodes) Swap(i, j int) { n[i], n[j] = n[j], n[i] } @@ -2531,7 +2530,7 @@ func (woc *wfOperationCtx) addParamToGlobalScope(param wfv1.Parameter) { // addArtifactToGlobalScope exports any desired node outputs to the global scope // Optionally adds to a local scope if supplied -func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact, scope *wfScope) { +func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact) { if art.GlobalName == "" { return } @@ -2545,9 +2544,6 @@ func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact, scope *wf art.Path = "" if !reflect.DeepEqual(woc.wf.Status.Outputs.Artifacts[i], art) { woc.wf.Status.Outputs.Artifacts[i] = art - if scope != nil { - scope.addArtifactToScope(globalArtName, art) - } woc.log.Infof("overwriting %s: %v", globalArtName, art) woc.updated = true } @@ -2563,9 +2559,6 @@ func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact, scope *wf art.Path = "" woc.log.Infof("setting %s: %v", globalArtName, art) woc.wf.Status.Outputs.Artifacts = append(woc.wf.Status.Outputs.Artifacts, art) - if scope != nil { - scope.addArtifactToScope(globalArtName, art) - } woc.updated = true } @@ -2591,7 +2584,6 @@ func (woc *wfOperationCtx) addChildNode(parent string, child string) { // executeResource is runs a kubectl command against a manifest func (woc *wfOperationCtx) executeResource(ctx context.Context, nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) { - node := woc.wf.GetNodeByName(nodeName) if node == nil { @@ -2823,7 +2815,7 @@ func (woc *wfOperationCtx) substituteParamsInVolumes(params map[string]string) e } fstTmpl, err := fasttemplate.NewTemplate(string(volumesBytes), "{{", "}}") if err != nil { - return fmt.Errorf("unable to parse argo varaible: %w", err) + return fmt.Errorf("unable to parse argo variable: %w", err) } newVolumesStr, err := common.Replace(fstTmpl, params, true) if err != nil { @@ -2895,7 +2887,7 @@ func (woc *wfOperationCtx) computeMetrics(metricList []*wfv1.Prometheus, localSc } fstTmpl, err := fasttemplate.NewTemplate(string(metricTmplBytes), "{{", "}}") if err != nil { - woc.reportMetricEmissionError(fmt.Sprintf("unable to parse argo varaible for metric '%s': %s", metricTmpl.Name, err)) + woc.reportMetricEmissionError(fmt.Sprintf("unable to parse argo variable for metric '%s': %s", metricTmpl.Name, err)) continue } replacedValue, err := common.Replace(fstTmpl, localScope, false) @@ -2955,7 +2947,7 @@ func (woc *wfOperationCtx) computeMetrics(metricList []*wfv1.Prometheus, localSc // Finally substitute value parameters fstTmpl, err = fasttemplate.NewTemplate(metricSpec.GetValueString(), "{{", "}}") if err != nil { - woc.reportMetricEmissionError(fmt.Sprintf("unable to parse argo varaible for metric '%s': %s", metricTmpl.Name, err)) + woc.reportMetricEmissionError(fmt.Sprintf("unable to parse argo variable for metric '%s': %s", metricTmpl.Name, err)) continue } replacedValue, err := common.Replace(fstTmpl, localScope, false) @@ -2994,7 +2986,6 @@ func (woc *wfOperationCtx) reportMetricEmissionError(errorString string) { } func (woc *wfOperationCtx) createPDBResource(ctx context.Context) error { - if woc.execWf.Spec.PodDisruptionBudget == nil { return nil } @@ -3158,7 +3149,6 @@ func (woc *wfOperationCtx) setStoredWfSpec() error { } if mergedWf.Spec.String() != woc.wf.Status.StoredWorkflowSpec.String() { return fmt.Errorf("workflowTemplateRef reference may not change during execution when the controller is in reference mode") - } } return nil diff --git a/workflow/controller/operator_concurrency_test.go b/workflow/controller/operator_concurrency_test.go index 105f627dc7b8..94b04d142f6b 100644 --- a/workflow/controller/operator_concurrency_test.go +++ b/workflow/controller/operator_concurrency_test.go @@ -6,11 +6,10 @@ import ( "strings" "testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/yaml" argoErr "github.com/argoproj/argo-workflows/v3/errors" @@ -28,6 +27,7 @@ data: template: "1" step: "1" ` + const wfWithSemaphore = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -77,6 +77,7 @@ spec: exit_code = random.choice([0, 1, 1]); sys.exit(exit_code) ` + const ResourceWfWithSemaphore = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -113,7 +114,6 @@ func GetSyncLimitFunc(ctx context.Context, kube kubernetes.Interface) func(strin } configMap, err := kube.CoreV1().ConfigMaps(items[0]).Get(ctx, items[2], metav1.GetOptions{}) - if err != nil { return 0, err } @@ -187,7 +187,6 @@ func TestSemaphoreTmplLevel(t *testing.T) { assert.NotNil(t, woc_two.wf.Status.Synchronization) assert.NotNil(t, woc_two.wf.Status.Synchronization.Semaphore) assert.Equal(t, 1, len(woc_two.wf.Status.Synchronization.Semaphore.Holding)) - }) } @@ -249,7 +248,6 @@ func TestSemaphoreScriptTmplLevel(t *testing.T) { assert.NotNil(t, woc_two.wf.Status.Synchronization) assert.NotNil(t, woc_two.wf.Status.Synchronization.Semaphore) assert.Equal(t, 1, len(woc_two.wf.Status.Synchronization.Semaphore.Holding)) - }) } @@ -312,9 +310,9 @@ func TestSemaphoreResourceTmplLevel(t *testing.T) { assert.NotNil(t, woc_two.wf.Status.Synchronization) assert.NotNil(t, woc_two.wf.Status.Synchronization.Semaphore) assert.Equal(t, 1, len(woc_two.wf.Status.Synchronization.Semaphore.Holding)) - }) } + func TestSemaphoreWithOutConfigMap(t *testing.T) { cancel, controller := newController() defer cancel() @@ -340,7 +338,6 @@ func TestSemaphoreWithOutConfigMap(t *testing.T) { for _, node := range woc.wf.Status.Nodes { assert.Equal(t, wfv1.NodeError, node.Phase) } - }) } @@ -484,7 +481,6 @@ func TestSynchronizationWithRetry(t *testing.T) { assert.Empty(woc.wf.Status.Synchronization.Semaphore.Waiting) // Nobody is holding the lock assert.Empty(woc.wf.Status.Synchronization.Semaphore.Holding[0].Holders) - }) } @@ -662,7 +658,7 @@ func TestSynchronizationWithStep(t *testing.T) { assert.NoError(err) t.Run("StepWithSychronization", func(t *testing.T) { - //First workflow Acquire the lock + // First workflow Acquire the lock wf := unmarshalWF(StepWithSync) wf, err := controller.wfclientset.ArgoprojV1alpha1().Workflows("default").Create(ctx, wf, metav1.CreateOptions{}) assert.NoError(err) @@ -684,7 +680,7 @@ func TestSynchronizationWithStep(t *testing.T) { assert.Nil(woc1.wf.Status.Synchronization.Semaphore.Holding) assert.Len(woc1.wf.Status.Synchronization.Semaphore.Waiting, 1) - //Finished all StepGroup in step + // Finished all StepGroup in step wf = unmarshalWF(StepWithSyncStatus) woc = newWorkflowOperationCtx(wf, controller) woc.operate(ctx) diff --git a/workflow/controller/operator_metrics_test.go b/workflow/controller/operator_metrics_test.go index 925a2a6b3071..91574f0b2035 100644 --- a/workflow/controller/operator_metrics_test.go +++ b/workflow/controller/operator_metrics_test.go @@ -5,13 +5,13 @@ import ( "fmt" "testing" - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) var basicMetric = ` diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index 7de8d44ba073..597f604c341a 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -13,7 +13,6 @@ import ( "time" "github.com/argoproj/pkg/strftime" - "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/assert" @@ -1270,7 +1269,7 @@ func TestWorkflowStepRetry(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 1, len(pods.Items)) - //complete the first pod + // complete the first pod makePodsPhase(ctx, woc, apiv1.PodSucceeded) wf, err = wfcset.Get(ctx, wf.ObjectMeta.Name, metav1.GetOptions{}) assert.Nil(t, err) @@ -1289,7 +1288,7 @@ func TestWorkflowStepRetry(t *testing.T) { assert.Equal(t, "cowsay success", pods.Items[0].Spec.Containers[1].Args[0]) assert.Equal(t, "cowsay failure", pods.Items[1].Spec.Containers[1].Args[0]) - //verify that after the cowsay failure pod failed, we are retrying cowsay success + // verify that after the cowsay failure pod failed, we are retrying cowsay success assert.Equal(t, "cowsay success", pods.Items[2].Spec.Containers[1].Args[0]) } } @@ -1667,7 +1666,6 @@ func TestSuspendWithDeadline(t *testing.T) { } } assert.True(t, found) - } var sequence = ` @@ -2234,7 +2232,6 @@ func TestAddGlobalParamToScope(t *testing.T) { assert.Equal(t, param.GlobalName, woc.wf.Status.Outputs.Parameters[1].Name) assert.Equal(t, newValue, woc.wf.Status.Outputs.Parameters[1].Value) assert.Equal(t, newValue.String(), woc.globalParams["workflow.outputs.parameters.global-param2"]) - } func TestAddGlobalArtifactToScope(t *testing.T) { @@ -2251,19 +2248,19 @@ func TestAddGlobalArtifactToScope(t *testing.T) { }, } // Make sure if the artifact is not global, don't add to scope - woc.addArtifactToGlobalScope(art, nil) + woc.addArtifactToGlobalScope(art) assert.Nil(t, woc.wf.Status.Outputs) // Now mark it as global. Verify it is added to workflow outputs art.GlobalName = "global-art" - woc.addArtifactToGlobalScope(art, nil) + woc.addArtifactToGlobalScope(art) assert.Equal(t, 1, len(woc.wf.Status.Outputs.Artifacts)) assert.Equal(t, art.GlobalName, woc.wf.Status.Outputs.Artifacts[0].Name) assert.Equal(t, "some/key", woc.wf.Status.Outputs.Artifacts[0].S3.Key) // Change the value and verify update is reflected art.S3.Key = "new/key" - woc.addArtifactToGlobalScope(art, nil) + woc.addArtifactToGlobalScope(art) assert.Equal(t, 1, len(woc.wf.Status.Outputs.Artifacts)) assert.Equal(t, art.GlobalName, woc.wf.Status.Outputs.Artifacts[0].Name) assert.Equal(t, "new/key", woc.wf.Status.Outputs.Artifacts[0].S3.Key) @@ -2271,7 +2268,7 @@ func TestAddGlobalArtifactToScope(t *testing.T) { // Add a new global artifact art.GlobalName = "global-art2" art.S3.Key = "new/new/key" - woc.addArtifactToGlobalScope(art, nil) + woc.addArtifactToGlobalScope(art) assert.Equal(t, 2, len(woc.wf.Status.Outputs.Artifacts)) assert.Equal(t, art.GlobalName, woc.wf.Status.Outputs.Artifacts[1].Name) assert.Equal(t, "new/new/key", woc.wf.Status.Outputs.Artifacts[1].S3.Key) @@ -2605,7 +2602,6 @@ spec: ` func TestResolveStatuses(t *testing.T) { - cancel, controller := newController() defer cancel() wfcset := controller.wfclientset.ArgoprojV1alpha1().Workflows("") @@ -2852,7 +2848,6 @@ spec: cat /dev/urandom | od -N2 -An -i | awk -v f=1 -v r=100 '{printf "%i\n", f + r * $1 / 65536}'` func TestStepWFGetNodeName(t *testing.T) { - cancel, controller := newController() defer cancel() wfcset := controller.wfclientset.ArgoprojV1alpha1().Workflows("") @@ -2878,7 +2873,6 @@ func TestStepWFGetNodeName(t *testing.T) { } func TestDAGWFGetNodeName(t *testing.T) { - cancel, controller := newController() defer cancel() wfcset := controller.wfclientset.ArgoprojV1alpha1().Workflows("") @@ -4420,7 +4414,7 @@ spec: ` func TestConfigMapCacheLoadOperate(t *testing.T) { - var sampleConfigMapCacheEntry = apiv1.ConfigMap{ + sampleConfigMapCacheEntry := apiv1.ConfigMap{ Data: map[string]string{ "hi-there-world": `{"nodeID":"memoized-simple-workflow-5wj2p","outputs":{"parameters":[{"name":"hello","value":"foobar","valueFrom":{"path":"/tmp/hello_world.txt"}}],"artifacts":[{"name":"main-logs","archiveLogs":true,"s3":{"endpoint":"minio:9000","bucket":"my-bucket","insecure":true,"accessKeySecret":{"name":"my-minio-cred","key":"accesskey"},"secretKeySecret":{"name":"my-minio-cred","key":"secretkey"},"key":"memoized-simple-workflow-5wj2p/memoized-simple-workflow-5wj2p/main.log"}}]},"creationTimestamp":"2020-09-21T18:12:56Z"}`, }, @@ -4549,7 +4543,7 @@ func TestConfigMapCacheLoadOperateMaxAge(t *testing.T) { } func TestConfigMapCacheLoadNilOutputs(t *testing.T) { - var sampleConfigMapCacheEntry = apiv1.ConfigMap{ + sampleConfigMapCacheEntry := apiv1.ConfigMap{ Data: map[string]string{ "hi-there-world": `{"ExpiresAt":"2020-06-18T17:11:05Z","NodeID":"memoize-abx4124-123129321123","Outputs":{}}`, }, @@ -4740,7 +4734,6 @@ func TestCheckForbiddenErrorAndResbmitAllowed(t *testing.T) { assert.Error(t, err) assert.Nil(t, node) }) - } func TestResubmitMemoization(t *testing.T) { @@ -4814,7 +4807,7 @@ spec: defer cancel() woc := newWorkflowOperationCtx(wf, controller) - // reconcille + // reconcile ctx := context.Background() woc.operate(ctx) assert.Equal(t, wfv1.WorkflowRunning, woc.wf.Status.Phase) @@ -4822,7 +4815,7 @@ spec: // make all created pods as successful makePodsPhase(ctx, woc, apiv1.PodSucceeded, withOutputs(`{"parameters": [{"name": "my-param"}]}`)) - // reconcille + // reconcile woc = newWorkflowOperationCtx(woc.wf, controller) woc.operate(ctx) assert.Equal(t, wfv1.WorkflowSucceeded, woc.wf.Status.Phase) @@ -5169,7 +5162,6 @@ spec: ` func TestTemplateTimeoutDuration(t *testing.T) { - t.Run("Step Template Deadline", func(t *testing.T) { wf := unmarshalWF(stepTimeoutWf) cancel, controller := newController(wf) diff --git a/workflow/controller/operator_wfdefault_test.go b/workflow/controller/operator_wfdefault_test.go index d092a6037172..f9519fdcc626 100644 --- a/workflow/controller/operator_wfdefault_test.go +++ b/workflow/controller/operator_wfdefault_test.go @@ -46,6 +46,7 @@ var wfDefaults = ` secret: secretName: test ` + var simpleWf = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -62,6 +63,7 @@ spec: command: [cowsay] args: ["hello world"] ` + var wf_wfdefaultResult = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -108,6 +110,7 @@ spec: secret: secretName: test ` + var simpleWFT = ` apiVersion: argoproj.io/v1alpha1 kind: WorkflowTemplate @@ -130,6 +133,7 @@ spec: command: [cowsay] args: ["{{inputs.parameters.message}}"] ` + var storedSpecResult = ` { "activeDeadlineSeconds": 7200, @@ -250,7 +254,8 @@ func TestWFDefaultWithWFTAndWf(t *testing.T) { SecondsAfterCompletion: pointer.Int32Ptr(10), } - wf := wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{Namespace: "default"}, + wf := wfv1.Workflow{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default"}, Spec: wfv1.WorkflowSpec{ WorkflowTemplateRef: &wfv1.WorkflowTemplateRef{Name: "workflow-template-submittable"}, Entrypoint: "Test", @@ -285,7 +290,8 @@ func TestWFDefaultWithWFTAndWf(t *testing.T) { SecondsAfterCompletion: pointer.Int32Ptr(10), } - wf := wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{Namespace: "default"}, + wf := wfv1.Workflow{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default"}, Spec: wfv1.WorkflowSpec{ WorkflowTemplateRef: &wfv1.WorkflowTemplateRef{Name: "workflow-template-submittable"}, Entrypoint: "Test", @@ -296,7 +302,7 @@ func TestWFDefaultWithWFTAndWf(t *testing.T) { }, }, } - //resultSpec.Arguments.Parameters = append(resultSpec.Arguments.Parameters, args.Parameters...) + // resultSpec.Arguments.Parameters = append(resultSpec.Arguments.Parameters, args.Parameters...) resultSpec.Entrypoint = "Test" resultSpec.TTLStrategy = &ttlStrategy resultSpec.WorkflowTemplateRef = &wfv1.WorkflowTemplateRef{Name: "workflow-template-submittable"} @@ -308,5 +314,4 @@ func TestWFDefaultWithWFTAndWf(t *testing.T) { assert.Contains(woc.execWf.Spec.Arguments.Parameters, param) assert.Contains(woc.wf.Status.StoredWorkflowSpec.Arguments.Artifacts, art) }) - } diff --git a/workflow/controller/operator_workflow_template_ref_test.go b/workflow/controller/operator_workflow_template_ref_test.go index 13c10fb528c8..8af9e222d7e3 100644 --- a/workflow/controller/operator_workflow_template_ref_test.go +++ b/workflow/controller/operator_workflow_template_ref_test.go @@ -45,8 +45,8 @@ func TestWorkflowTemplateRefWithArgs(t *testing.T) { woc.operate(ctx) assert.Equal(t, "test", woc.globalParams["workflow.parameters.param1"]) }) - } + func TestWorkflowTemplateRefWithWorkflowTemplateArgs(t *testing.T) { wf := unmarshalWF(wfWithTmplRef) wftmpl := unmarshalWFTmpl(wfTmpl) @@ -65,21 +65,20 @@ func TestWorkflowTemplateRefWithWorkflowTemplateArgs(t *testing.T) { woc := newWorkflowOperationCtx(wf, controller) woc.operate(ctx) assert.Equal(t, "test", woc.globalParams["workflow.parameters.param1"]) - }) t.Run("CheckMergingWFDefaults", func(t *testing.T) { wfDefaultActiveS := int64(5) cancel, controller := newController(wf, wftmpl) defer cancel() - controller.Config.WorkflowDefaults = &wfv1.Workflow{Spec: wfv1.WorkflowSpec{ - ActiveDeadlineSeconds: &wfDefaultActiveS, - }, + controller.Config.WorkflowDefaults = &wfv1.Workflow{ + Spec: wfv1.WorkflowSpec{ + ActiveDeadlineSeconds: &wfDefaultActiveS, + }, } woc := newWorkflowOperationCtx(wf, controller) woc.operate(ctx) assert.Equal(t, wfDefaultActiveS, *woc.execWf.Spec.ActiveDeadlineSeconds) - }) t.Run("CheckMergingWFTandWF", func(t *testing.T) { wfActiveS := int64(10) @@ -89,9 +88,10 @@ func TestWorkflowTemplateRefWithWorkflowTemplateArgs(t *testing.T) { wftmpl.Spec.ActiveDeadlineSeconds = &wftActiveS cancel, controller := newController(wf, wftmpl) defer cancel() - controller.Config.WorkflowDefaults = &wfv1.Workflow{Spec: wfv1.WorkflowSpec{ - ActiveDeadlineSeconds: &wfDefaultActiveS, - }, + controller.Config.WorkflowDefaults = &wfv1.Workflow{ + Spec: wfv1.WorkflowSpec{ + ActiveDeadlineSeconds: &wfDefaultActiveS, + }, } wf.Spec.ActiveDeadlineSeconds = &wfActiveS woc := newWorkflowOperationCtx(wf, controller) @@ -172,6 +172,7 @@ spec: command: [echo] args: ["{{workflows.parameters.a-a}} = {{workflows.parameters.g-g}}"] ` + var wfWithParam = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -209,7 +210,6 @@ func TestWorkflowTemplateRefParamMerge(t *testing.T) { woc.operate(ctx) assert.Equal(t, wf.Spec.Arguments.Parameters, woc.wf.Spec.Arguments.Parameters) }) - } var wftWithArtifact = ` diff --git a/workflow/controller/pod_cleanup_key.go b/workflow/controller/pod_cleanup_key.go index 6728aa636f55..5be5e047bd9a 100644 --- a/workflow/controller/pod_cleanup_key.go +++ b/workflow/controller/pod_cleanup_key.go @@ -9,8 +9,10 @@ import ( // * cleanup is a noun - e.g "The cleanup" // * clean-up is a verb - e.g. "I clean-up" -type podCleanupKey = string // describes the pod to cleanup + the cleanup action to take -type podCleanupAction = string +type ( + podCleanupKey = string // describes the pod to cleanup + the cleanup action to take + podCleanupAction = string +) const ( deletePod podCleanupAction = "deletePod" @@ -27,5 +29,4 @@ func parsePodCleanupKey(k podCleanupKey) (namespace string, podName string, acti return "", "", "" } return parts[0], parts[1], parts[2] - } diff --git a/workflow/controller/rate_limiters.go b/workflow/controller/rate_limiters.go index 877f7e32271e..ad5cc904651b 100644 --- a/workflow/controller/rate_limiters.go +++ b/workflow/controller/rate_limiters.go @@ -11,7 +11,7 @@ import ( type fixedItemIntervalRateLimiter struct{} func (r *fixedItemIntervalRateLimiter) When(interface{}) time.Duration { - // We need to rate limit a minimum 1s, otherwise informers are unlikey to be upto date + // We need to rate limit a minimum 1s, otherwise informers are unlikely to be upto date // and we'll operate on an out of date version of a workflow. // Under high load, the informer can get many seconds behind. Increasing this to 30s // would be sensible for some users. diff --git a/workflow/controller/scope.go b/workflow/controller/scope.go index d98405cc5dc3..9bfe58771883 100644 --- a/workflow/controller/scope.go +++ b/workflow/controller/scope.go @@ -70,9 +70,7 @@ func (s *wfScope) resolveParameter(v string) (string, error) { } func (s *wfScope) resolveArtifact(v string, subPath string) (*wfv1.Artifact, error) { - val, err := s.resolveVar(v) - if err != nil { return nil, err } diff --git a/workflow/controller/scope_test.go b/workflow/controller/scope_test.go index 2f02738ed228..710ff8cb64ce 100644 --- a/workflow/controller/scope_test.go +++ b/workflow/controller/scope_test.go @@ -58,7 +58,7 @@ func artifactSubPathResolution(t *testing.T, artifactString string, subPathArtif } func TestSubPathResolution(t *testing.T) { - var s3Artifact = ` + s3Artifact := ` name: s3-artifact path: some/local/path s3: @@ -73,7 +73,7 @@ func TestSubPathResolution(t *testing.T) { name: my-minio-cred ` - var s3ArtifactWithSubpath = ` + s3ArtifactWithSubpath := ` name: s3-artifact path: some/local/path s3: @@ -88,7 +88,7 @@ func TestSubPathResolution(t *testing.T) { name: my-minio-cred ` - var ArtifactoryArtifact = ` + ArtifactoryArtifact := ` name: artifactory-artifact path: some/local/path artifactory: @@ -101,7 +101,7 @@ func TestSubPathResolution(t *testing.T) { name: my-artifactory-cred ` - var ArtifactoryArtifactWithSubpath = ` + ArtifactoryArtifactWithSubpath := ` name: artifactory-artifact path: some/local/path artifactory: @@ -114,7 +114,7 @@ func TestSubPathResolution(t *testing.T) { name: my-artifactory-cred ` - var GCSArtifact = ` + GCSArtifact := ` name: gcs-artifact path: some/local/path gcs: @@ -125,7 +125,7 @@ func TestSubPathResolution(t *testing.T) { name: my-gcs-cred ` - var GCSArtifactWithSubpath = ` + GCSArtifactWithSubpath := ` name: gcs-artifact path: some/local/path gcs: @@ -136,7 +136,7 @@ func TestSubPathResolution(t *testing.T) { name: my-gcs-cred ` - var HDFSArtifact = ` + HDFSArtifact := ` name: hdfs-artifact path: some/local/path hdfs: @@ -146,7 +146,7 @@ func TestSubPathResolution(t *testing.T) { path: /path/to/some/key hdfsUser: root ` - var HDFSArtifactWithSubpath = ` + HDFSArtifactWithSubpath := ` name: hdfs-artifact path: some/local/path hdfs: @@ -157,7 +157,7 @@ func TestSubPathResolution(t *testing.T) { hdfsUser: root ` - var OSSArtifact = ` + OSSArtifact := ` name: oss-artifact path: some/local/path oss: @@ -171,7 +171,7 @@ func TestSubPathResolution(t *testing.T) { name: my-oss-credentials key: secretKey ` - var OSSArtifactWithSubpath = ` + OSSArtifactWithSubpath := ` name: oss-artifact path: some/local/path oss: @@ -186,27 +186,27 @@ func TestSubPathResolution(t *testing.T) { key: secretKey ` - var HTTPArtifact = ` + HTTPArtifact := ` name: oss-artifact path: some/local/path http: url: https://example.com ` - var HTTPArtifactWithSubpath = ` + HTTPArtifactWithSubpath := ` name: oss-artifact path: some/local/path http: url: https://example.com/some/subkey ` - var GitArtifact = ` + GitArtifact := ` name: git-artifact path: some/local/path git: repo: https://github.com/argoproj/argo-workflows ` - var RawArtifact = ` + RawArtifact := ` name: raw-artifact path: some/local/path raw: diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index 44e5ffaad442..1344285f2900 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -355,7 +355,7 @@ func (woc *wfOperationCtx) resolveReferences(stepGroup []wfv1.WorkflowStep, scop } fstTmpl, err := fasttemplate.NewTemplate(string(stepBytes), "{{", "}}") if err != nil { - return nil, fmt.Errorf("unable to parse argo varaible: %w", err) + return nil, fmt.Errorf("unable to parse argo variable: %w", err) } newStepStr, err := common.Replace(fstTmpl, woc.globalParams.Merge(scope.getParameters()), true) @@ -470,7 +470,7 @@ func (woc *wfOperationCtx) expandStep(step wfv1.WorkflowStep) ([]wfv1.WorkflowSt } fstTmpl, err := fasttemplate.NewTemplate(string(stepBytes), "{{", "}}") if err != nil { - return nil, fmt.Errorf("unable to parse argo varaible: %w", err) + return nil, fmt.Errorf("unable to parse argo variable: %w", err) } for i, item := range items { diff --git a/workflow/controller/steps_test.go b/workflow/controller/steps_test.go index 321ec6bf3620..17659d3743f5 100644 --- a/workflow/controller/steps_test.go +++ b/workflow/controller/steps_test.go @@ -128,7 +128,7 @@ func TestStepsWithParamAndGlobalParam(t *testing.T) { } func TestResourceDurationMetric(t *testing.T) { - var nodeStatus = ` + nodeStatus := ` boundaryID: many-items-z26lj displayName: sleep(4:four) finishedAt: "2020-06-02T16:04:50Z" diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index aa6c6834c2cc..c090b3eb9585 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -172,7 +172,7 @@ func (woc *wfOperationCtx) createWorkflowPod(ctx context.Context, nodeName strin if err != nil { return nil, err } - if wfDeadline == nil || opts.onExitPod { //ignore the workflow deadline for exit handler so they still run if the deadline has passed + if wfDeadline == nil || opts.onExitPod { // ignore the workflow deadline for exit handler so they still run if the deadline has passed activeDeadlineSeconds = tmplActiveDeadlineSeconds } else { wfActiveDeadlineSeconds := int64((*wfDeadline).Sub(time.Now().UTC()).Seconds()) @@ -243,10 +243,7 @@ func (woc *wfOperationCtx) createWorkflowPod(ctx context.Context, nodeName strin // we do not need the wait container for resource templates because // argoexec runs as the main container and will perform the job of // annotating the outputs or errors, making the wait container redundant. - waitCtr, err := woc.newWaitContainer(tmpl) - if err != nil { - return nil, err - } + waitCtr := woc.newWaitContainer(tmpl) pod.Spec.Containers = append(pod.Spec.Containers, *waitCtr) } // NOTE: the order of the container list is significant. kubelet will pull, create, and start @@ -281,14 +278,8 @@ func (woc *wfOperationCtx) createWorkflowPod(ctx context.Context, nodeName strin // addInitContainers, addSidecars and addOutputArtifactsVolumes should be called after all // volumes have been manipulated in the main container since volumeMounts are mirrored - err = addInitContainers(pod, tmpl) - if err != nil { - return nil, err - } - err = addSidecars(pod, tmpl) - if err != nil { - return nil, err - } + addInitContainers(pod, tmpl) + addSidecars(pod, tmpl) addOutputArtifactsVolumes(pod, tmpl) for i, c := range pod.Spec.Containers { @@ -357,7 +348,6 @@ func (woc *wfOperationCtx) createWorkflowPod(ctx context.Context, nodeName strin } modJson, err := strategicpatch.StrategicMergePatch(jsonstr, []byte(tmpl.PodSpecPatch), apiv1.PodSpec{}) - if err != nil { return nil, errors.Wrap(err, "", "Error occurred during strategic merge patch") } @@ -417,7 +407,7 @@ func substitutePodParams(pod *apiv1.Pod, globalParams common.Parameters, tmpl *w } fstTmpl, err := fasttemplate.NewTemplate(string(specBytes), "{{", "}}") if err != nil { - return nil, fmt.Errorf("unable to parse argo varaible: %w", err) + return nil, fmt.Errorf("unable to parse argo variable: %w", err) } newSpecBytes, err := common.Replace(fstTmpl, podParams, true) if err != nil { @@ -437,7 +427,7 @@ func (woc *wfOperationCtx) newInitContainer(tmpl *wfv1.Template) apiv1.Container return *ctr } -func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Container, error) { +func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) *apiv1.Container { ctr := woc.newExecContainer(common.WaitContainerName, tmpl) ctr.Command = []string{"argoexec", "wait", "--loglevel", getExecutorLogLevel()} switch woc.controller.GetContainerRuntimeExecutor() { @@ -461,7 +451,7 @@ func (woc *wfOperationCtx) newWaitContainer(tmpl *wfv1.Template) (*apiv1.Contain case "", common.ContainerRuntimeExecutorDocker: ctr.VolumeMounts = append(ctr.VolumeMounts, woc.getVolumeMountDockerSock(tmpl)) } - return ctr, nil + return ctr } func getExecutorLogLevel() string { @@ -1065,9 +1055,9 @@ func addScriptStagingVolume(pod *apiv1.Pod) { // addInitContainers adds all init containers to the pod spec of the step // Optionally volume mounts from the main container to the init containers -func addInitContainers(pod *apiv1.Pod, tmpl *wfv1.Template) error { +func addInitContainers(pod *apiv1.Pod, tmpl *wfv1.Template) { if len(tmpl.InitContainers) == 0 { - return nil + return } mainCtr := findMainContainer(pod) if mainCtr == nil { @@ -1080,14 +1070,13 @@ func addInitContainers(pod *apiv1.Pod, tmpl *wfv1.Template) error { } pod.Spec.InitContainers = append(pod.Spec.InitContainers, ctr.Container) } - return nil } // addSidecars adds all sidecars to the pod spec of the step. // Optionally volume mounts from the main container to the sidecar -func addSidecars(pod *apiv1.Pod, tmpl *wfv1.Template) error { +func addSidecars(pod *apiv1.Pod, tmpl *wfv1.Template) { if len(tmpl.Sidecars) == 0 { - return nil + return } mainCtr := findMainContainer(pod) if mainCtr == nil { @@ -1100,7 +1089,6 @@ func addSidecars(pod *apiv1.Pod, tmpl *wfv1.Template) error { } pod.Spec.Containers = append(pod.Spec.Containers, sidecar.Container) } - return nil } // verifyResolvedVariables is a helper to ensure all {{variables}} have been resolved for a object @@ -1112,7 +1100,7 @@ func verifyResolvedVariables(obj interface{}) error { var unresolvedErr error fstTmpl, err := fasttemplate.NewTemplate(string(str), "{{", "}}") if err != nil { - return fmt.Errorf("unable to parse argo varaible: %w", err) + return fmt.Errorf("unable to parse argo variable: %w", err) } fstTmpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { unresolvedErr = errors.Errorf(errors.CodeBadRequest, "failed to resolve {{%s}}", tag) @@ -1123,8 +1111,8 @@ func verifyResolvedVariables(obj interface{}) error { // createSecretVolumes will retrieve and create Volumes and Volumemount object for Pod func createSecretVolumes(tmpl *wfv1.Template) ([]apiv1.Volume, []apiv1.VolumeMount) { - var allVolumesMap = make(map[string]apiv1.Volume) - var uniqueKeyMap = make(map[string]bool) + allVolumesMap := make(map[string]apiv1.Volume) + uniqueKeyMap := make(map[string]bool) var secretVolumes []apiv1.Volume var secretVolMounts []apiv1.VolumeMount diff --git a/workflow/controller/workflowpod_test.go b/workflow/controller/workflowpod_test.go index 50a9956cf8cd..6b4627dddbd3 100644 --- a/workflow/controller/workflowpod_test.go +++ b/workflow/controller/workflowpod_test.go @@ -697,7 +697,6 @@ func TestVolumesPodSubstitution(t *testing.T) { } func TestOutOfCluster(t *testing.T) { - verifyKubeConfigVolume := func(ctr apiv1.Container, volName, mountPath string) { for _, vol := range ctr.VolumeMounts { if vol.Name == volName && vol.MountPath == mountPath { @@ -925,7 +924,6 @@ func TestSidecars(t *testing.T) { } func TestTemplateLocalVolumes(t *testing.T) { - volumes := []apiv1.Volume{ { Name: "volume-name", @@ -992,7 +990,6 @@ func TestWFLevelHostAliases(t *testing.T) { assert.Len(t, pods.Items, 1) pod := pods.Items[0] assert.NotNil(t, pod.Spec.HostAliases) - } // TestTmplLevelHostAliases verifies the ability to carry forward template level HostAliases to Podspec @@ -1012,7 +1009,6 @@ func TestTmplLevelHostAliases(t *testing.T) { assert.Len(t, pods.Items, 1) pod := pods.Items[0] assert.NotNil(t, pod.Spec.HostAliases) - } // TestWFLevelSecurityContext verifies the ability to carry forward workflow level SecurityContext to Podspec @@ -1130,7 +1126,6 @@ func TestPodSpecPatch(t *testing.T) { assert.Equal(t, "0.800", pod.Spec.Containers[1].Resources.Limits.Cpu().AsDec().String()) assert.Equal(t, "104857600", pod.Spec.Containers[1].Resources.Limits.Memory().AsDec().String()) - } func TestMainContainerCustomization(t *testing.T) { diff --git a/workflow/creator/creator_test.go b/workflow/creator/creator_test.go index 5664a5b30e27..86bd637123a1 100644 --- a/workflow/creator/creator_test.go +++ b/workflow/creator/creator_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "gopkg.in/square/go-jose.v2/jwt" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" diff --git a/workflow/cron/operator_test.go b/workflow/cron/operator_test.go index 83560bebc29f..848a852b883d 100644 --- a/workflow/cron/operator_test.go +++ b/workflow/cron/operator_test.go @@ -128,8 +128,7 @@ func TestRunOutstandingWorkflows(t *testing.T) { assert.True(t, missedExecutionTime.IsZero()) } -type fakeLister struct { -} +type fakeLister struct{} func (f fakeLister) List() ([]*v1alpha1.Workflow, error) { // Do nothing diff --git a/workflow/events/event_recorder_manager.go b/workflow/events/event_recorder_manager.go index 3b26e1ed0efa..f53d8d3a1064 100644 --- a/workflow/events/event_recorder_manager.go +++ b/workflow/events/event_recorder_manager.go @@ -37,7 +37,6 @@ func (m *eventRecorderManager) Get(namespace string) record.EventRecorder { eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: m.kubernetes.CoreV1().Events(namespace)}) m.eventRecorders[namespace] = eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{Component: "workflow-controller"}) return m.eventRecorders[namespace] - } func NewEventRecorderManager(kubernetes kubernetes.Interface) EventRecorderManager { diff --git a/workflow/executor/docker/docker.go b/workflow/executor/docker/docker.go index 90c404b1498b..65ec4519ff28 100644 --- a/workflow/executor/docker/docker.go +++ b/workflow/executor/docker/docker.go @@ -252,7 +252,6 @@ func (d *DockerExecutor) getContainerID(containerName string) (string, error) { // killContainers kills a list of containerNames first with a SIGTERM then with a SIGKILL after a grace period func (d *DockerExecutor) Kill(ctx context.Context, containerNames []string, terminationGracePeriodDuration time.Duration) error { - containerIDs, err := d.getContainerIDs(containerNames) if err != nil { return err diff --git a/workflow/executor/executor.go b/workflow/executor/executor.go index 64402bbdbab2..7d861ffb1054 100644 --- a/workflow/executor/executor.go +++ b/workflow/executor/executor.go @@ -670,7 +670,6 @@ func (we *WorkflowExecutor) GetTerminationGracePeriodDuration(ctx context.Contex // CaptureScriptResult will add the stdout of a script template as output result func (we *WorkflowExecutor) CaptureScriptResult(ctx context.Context) error { - if we.ExecutionControl == nil || !we.ExecutionControl.IncludeScriptOutput { log.Infof("No Script output reference in workflow. Capturing script output ignored") return nil diff --git a/workflow/executor/executor_test.go b/workflow/executor/executor_test.go index 0ce37ba1ee93..8985070eb248 100644 --- a/workflow/executor/executor_test.go +++ b/workflow/executor/executor_test.go @@ -239,7 +239,6 @@ func TestUntar(t *testing.T) { } func TestChmod(t *testing.T) { - type perm struct { dir string file string @@ -293,7 +292,6 @@ func TestChmod(t *testing.T) { assert.NoError(t, err) assert.Equal(t, filePermission.Mode().String(), test.permissions.file) } - } func TestSaveArtifacts(t *testing.T) { diff --git a/workflow/executor/k8sapi/client.go b/workflow/executor/k8sapi/client.go index 6f9dfe81b02d..1f4fc8196c50 100644 --- a/workflow/executor/k8sapi/client.go +++ b/workflow/executor/k8sapi/client.go @@ -31,13 +31,13 @@ type k8sAPIClient struct { var _ execcommon.KubernetesClientInterface = &k8sAPIClient{} -func newK8sAPIClient(clientset kubernetes.Interface, config *restclient.Config, podName, namespace string) (*k8sAPIClient, error) { +func newK8sAPIClient(clientset kubernetes.Interface, config *restclient.Config, podName, namespace string) *k8sAPIClient { return &k8sAPIClient{ clientset: clientset, config: config, podName: podName, namespace: namespace, - }, nil + } } func (c *k8sAPIClient) CreateArchive(ctx context.Context, containerName, sourcePath string) (*bytes.Buffer, error) { diff --git a/workflow/executor/k8sapi/k8sapi.go b/workflow/executor/k8sapi/k8sapi.go index 67ae5ed932d1..891f73ef89fc 100644 --- a/workflow/executor/k8sapi/k8sapi.go +++ b/workflow/executor/k8sapi/k8sapi.go @@ -19,15 +19,12 @@ type K8sAPIExecutor struct { client *k8sAPIClient } -func NewK8sAPIExecutor(clientset kubernetes.Interface, config *restclient.Config, podName, namespace string) (*K8sAPIExecutor, error) { +func NewK8sAPIExecutor(clientset kubernetes.Interface, config *restclient.Config, podName, namespace string) *K8sAPIExecutor { log.Infof("Creating a K8sAPI executor") - client, err := newK8sAPIClient(clientset, config, podName, namespace) - if err != nil { - return nil, errors.InternalWrapError(err) - } + client := newK8sAPIClient(clientset, config, podName, namespace) return &K8sAPIExecutor{ client: client, - }, nil + } } func (k *K8sAPIExecutor) GetFileContents(containerName string, sourcePath string) (string, error) { diff --git a/workflow/executor/kubelet/client.go b/workflow/executor/kubelet/client.go index af2a6904baac..c95120547419 100644 --- a/workflow/executor/kubelet/client.go +++ b/workflow/executor/kubelet/client.go @@ -275,7 +275,6 @@ func (k *kubeletClient) getCommandOutput(containerName, command string) (*bytes. return nil, err } return k.readFileContents(u) - } // WaitForTermination of the given container, set the timeout to 0 to discard it diff --git a/workflow/executor/pns/pns.go b/workflow/executor/pns/pns.go index dd0ff5277ba6..66b551ab57b8 100644 --- a/workflow/executor/pns/pns.go +++ b/workflow/executor/pns/pns.go @@ -52,10 +52,7 @@ func NewPNSExecutor(clientset *kubernetes.Clientset, podName, namespace string) if thisPID == 1 { return nil, errors.New(errors.CodeBadRequest, "process namespace sharing is not enabled on pod") } - delegate, err := k8sapi.NewK8sAPIExecutor(clientset, nil, podName, namespace) - if err != nil { - return nil, err - } + delegate := k8sapi.NewK8sAPIExecutor(clientset, nil, podName, namespace) return &PNSExecutor{ K8sAPIExecutor: delegate, podName: podName, @@ -135,7 +132,6 @@ func (p *PNSExecutor) CopyFile(containerName string, sourcePath string, destPath } func (p *PNSExecutor) Wait(ctx context.Context, containerNames, sidecarNames []string) error { - allContainerNames := append(containerNames, sidecarNames...) go p.pollRootProcesses(ctx, allContainerNames) @@ -232,7 +228,7 @@ func (p *PNSExecutor) Kill(ctx context.Context, containerNames []string, termina for _, containerName := range containerNames { wg.Add(1) go func(containerName string) { - err := p.killContainer(ctx, containerName, terminationGracePeriodDuration) + err := p.killContainer(containerName, terminationGracePeriodDuration) if err != nil && asyncErr != nil { asyncErr = err } @@ -243,7 +239,7 @@ func (p *PNSExecutor) Kill(ctx context.Context, containerNames []string, termina return asyncErr } -func (p *PNSExecutor) killContainer(ctx context.Context, containerName string, terminationGracePeriodDuration time.Duration) error { +func (p *PNSExecutor) killContainer(containerName string, terminationGracePeriodDuration time.Duration) error { pid, err := p.getContainerPID(containerName) if err != nil { log.Warnf("Ignoring kill container failure of %q: %v. Process assumed to have completed", containerName, err) diff --git a/workflow/executor/resource.go b/workflow/executor/resource.go index 9e08c78839c2..baa680cdc2c7 100644 --- a/workflow/executor/resource.go +++ b/workflow/executor/resource.go @@ -13,8 +13,6 @@ import ( "strings" "time" - argoerr "github.com/argoproj/argo-workflows/v3/util/errors" - log "github.com/sirupsen/logrus" "github.com/tidwall/gjson" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -23,6 +21,7 @@ import ( "github.com/argoproj/argo-workflows/v3/errors" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" + argoerr "github.com/argoproj/argo-workflows/v3/util/errors" "github.com/argoproj/argo-workflows/v3/workflow/common" os_specific "github.com/argoproj/argo-workflows/v3/workflow/executor/os-specific" ) @@ -143,7 +142,6 @@ func (we *WorkflowExecutor) signalMonitoring(ctx context.Context) { // WaitResource waits for a specific resource to satisfy either the success or failure condition func (we *WorkflowExecutor) WaitResource(ctx context.Context, resourceNamespace string, resourceName string) error { - // Monitor the SIGTERM we.signalMonitoring(ctx) @@ -189,7 +187,6 @@ func (we *WorkflowExecutor) WaitResource(ctx context.Context, resourceNamespace log.Warnf("Waiting for resource %s resulted in non-retryable error %v", resourceName, err) return false, err }) - if err != nil { if err == wait.ErrWaitTimeout { log.Warnf("Waiting for resource %s resulted in timeout due to repeated errors", resourceName) @@ -223,7 +220,6 @@ func checkIfResourceDeleted(resourceName string, resourceNamespace string) bool // Function to do the kubectl get -w command and then waiting on json reading. func checkResourceState(resourceNamespace string, resourceName string, successReqs labels.Requirements, failReqs labels.Requirements) (bool, error) { - cmd, reader, err := startKubectlWaitCmd(resourceNamespace, resourceName) if argoerr.IsTransientErr(err) { return true, err @@ -241,7 +237,6 @@ func checkResourceState(resourceNamespace string, resourceName string, successRe } jsonBytes, err := readJSON(reader) - if err != nil { resultErr := err log.Warnf("Json reader returned error %v. Calling kill (usually superfluous)", err) diff --git a/workflow/hydrator/hydrator_test.go b/workflow/hydrator/hydrator_test.go index e5d23c4ba846..bc9277296646 100644 --- a/workflow/hydrator/hydrator_test.go +++ b/workflow/hydrator/hydrator_test.go @@ -76,8 +76,9 @@ func TestHydrator(t *testing.T) { offloadNodeStatusRepo := &sqldbmocks.OffloadNodeStatusRepo{} offloadNodeStatusRepo.On("Get", "my-uid", "my-offload-version").Return(wfv1.Nodes{"foo": wfv1.NodeStatus{}}, nil) hydrator := New(offloadNodeStatusRepo) - wf := &wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{UID: "my-uid"}, - Status: wfv1.WorkflowStatus{OffloadNodeStatusVersion: "my-offload-version"}, + wf := &wfv1.Workflow{ + ObjectMeta: metav1.ObjectMeta{UID: "my-uid"}, + Status: wfv1.WorkflowStatus{OffloadNodeStatusVersion: "my-offload-version"}, } err := hydrator.Hydrate(wf) if assert.NoError(t, err) { @@ -90,16 +91,18 @@ func TestHydrator(t *testing.T) { offloadNodeStatusRepo := &sqldbmocks.OffloadNodeStatusRepo{} offloadNodeStatusRepo.On("Get", "my-uid", "my-offload-version").Return(nil, sqldb.OffloadNotSupportedError) hydrator := New(offloadNodeStatusRepo) - wf := &wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{UID: "my-uid"}, - Status: wfv1.WorkflowStatus{OffloadNodeStatusVersion: "my-offload-version"}, + wf := &wfv1.Workflow{ + ObjectMeta: metav1.ObjectMeta{UID: "my-uid"}, + Status: wfv1.WorkflowStatus{OffloadNodeStatusVersion: "my-offload-version"}, } err := hydrator.Hydrate(wf) assert.Error(t, err) }) t.Run("Packed", func(t *testing.T) { hydrator := New(&sqldbmocks.OffloadNodeStatusRepo{}) - wf := &wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{UID: "my-uid"}, - Status: wfv1.WorkflowStatus{CompressedNodes: "H4sIAAAAAAAA/6pWSkosUrKqVspMUbJSUtJRykvMTYWwUjKLC3ISK/3gAiWVBVBWcUliUUlqimOJklVeaU6OjlJaZl5mcQZCpFZHKS0/nwbm1gICAAD//8SSRamxAAAA"}, + wf := &wfv1.Workflow{ + ObjectMeta: metav1.ObjectMeta{UID: "my-uid"}, + Status: wfv1.WorkflowStatus{CompressedNodes: "H4sIAAAAAAAA/6pWSkosUrKqVspMUbJSUtJRykvMTYWwUjKLC3ISK/3gAiWVBVBWcUliUUlqimOJklVeaU6OjlJaZl5mcQZCpFZHKS0/nwbm1gICAAD//8SSRamxAAAA"}, } err := hydrator.Hydrate(wf) if assert.NoError(t, err) { diff --git a/workflow/metrics/k8s_request_total_metric.go b/workflow/metrics/k8s_request_total_metric.go index 7b65a0e49055..ea241787b430 100644 --- a/workflow/metrics/k8s_request_total_metric.go +++ b/workflow/metrics/k8s_request_total_metric.go @@ -10,16 +10,14 @@ import ( "github.com/argoproj/argo-workflows/v3/util/k8s" ) -var ( - K8sRequestTotalMetric = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: "k8s_request_total", - Help: "Number of kubernetes requests executed. https://argoproj.github.io/argo-workflows/metrics/#argo_workflows_k8s_request_total", - }, - []string{"kind", "verb", "status_code"}, - ) +var K8sRequestTotalMetric = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: argoNamespace, + Subsystem: workflowsSubsystem, + Name: "k8s_request_total", + Help: "Number of kubernetes requests executed. https://argoproj.github.io/argo-workflows/metrics/#argo_workflows_k8s_request_total", + }, + []string{"kind", "verb", "status_code"}, ) type metricsRoundTripper struct { diff --git a/workflow/metrics/metrics.go b/workflow/metrics/metrics.go index 758c2120f3c2..23819145a59c 100644 --- a/workflow/metrics/metrics.go +++ b/workflow/metrics/metrics.go @@ -272,6 +272,7 @@ func (m *Metrics) NewWorkDurationMetric(name string) workqueue.HistogramMetric { func (m *Metrics) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { return noopMetric{} } + func (m *Metrics) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { return noopMetric{} } diff --git a/workflow/metrics/pod_missing_metric.go b/workflow/metrics/pod_missing_metric.go index 4ba35350c373..e1243b53c250 100644 --- a/workflow/metrics/pod_missing_metric.go +++ b/workflow/metrics/pod_missing_metric.go @@ -2,13 +2,11 @@ package metrics import "github.com/prometheus/client_golang/prometheus" -var ( - PodMissingMetric = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: argoNamespace, - Name: "pod_missing", - Help: "Incidents of pod missing. https://argoproj.github.io/argo-workflows/metrics/#argo_pod_missing", - }, - []string{"recently_started", "node_phase"}, - ) +var PodMissingMetric = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: argoNamespace, + Name: "pod_missing", + Help: "Incidents of pod missing. https://argoproj.github.io/argo-workflows/metrics/#argo_pod_missing", + }, + []string{"recently_started", "node_phase"}, ) diff --git a/workflow/metrics/util.go b/workflow/metrics/util.go index efbef5b15ae6..359db5815f1c 100644 --- a/workflow/metrics/util.go +++ b/workflow/metrics/util.go @@ -185,8 +185,8 @@ func getPodPhaseGauges() map[v1.PodPhase]prometheus.Gauge { return map[v1.PodPhase]prometheus.Gauge{ v1.PodPending: prometheus.NewGauge(getOptsByPhase(v1.PodPending)), v1.PodRunning: prometheus.NewGauge(getOptsByPhase(v1.PodRunning)), - //v1.PodSucceeded: prometheus.NewGauge(getOptsByPhase(v1.PodSucceeded)), - //v1.PodFailed: prometheus.NewGauge(getOptsByPhase(v1.PodFailed)), + // v1.PodSucceeded: prometheus.NewGauge(getOptsByPhase(v1.PodSucceeded)), + // v1.PodFailed: prometheus.NewGauge(getOptsByPhase(v1.PodFailed)), } } diff --git a/workflow/metrics/work_queue.go b/workflow/metrics/work_queue.go index becc8af7122f..0cc83354bfb3 100644 --- a/workflow/metrics/work_queue.go +++ b/workflow/metrics/work_queue.go @@ -42,7 +42,6 @@ func (w workersBusyRateLimiterWorkQueue) Get() (interface{}, bool) { item, shutdown := w.RateLimitingInterface.Get() w.metrics.workerBusy(w.workerType) return item, shutdown - } func (w workersBusyRateLimiterWorkQueue) Done(item interface{}) { diff --git a/workflow/metrics/work_queue_test.go b/workflow/metrics/work_queue_test.go index 5c53850186be..5d58b998e1a7 100644 --- a/workflow/metrics/work_queue_test.go +++ b/workflow/metrics/work_queue_test.go @@ -33,5 +33,4 @@ func TestMetricsWorkQueue(t *testing.T) { queue.Done("A") assert.Equal(t, float64(0), *write(m.workersBusy["test"]).Gauge.Value) - } diff --git a/workflow/metrics/workflow_condition_metric.go b/workflow/metrics/workflow_condition_metric.go index 55dbc6c978c2..cd1462d80a1c 100644 --- a/workflow/metrics/workflow_condition_metric.go +++ b/workflow/metrics/workflow_condition_metric.go @@ -4,14 +4,12 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -var ( - WorkflowConditionMetric = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: "workflow_condition", - Help: "Workflow condition. https://argoproj.github.io/argo-workflows/metrics/#argo_workflows_workflow_condition", - }, - []string{"type", "status"}, - ) +var WorkflowConditionMetric = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: argoNamespace, + Subsystem: workflowsSubsystem, + Name: "workflow_condition", + Help: "Workflow condition. https://argoproj.github.io/argo-workflows/metrics/#argo_workflows_workflow_condition", + }, + []string{"type", "status"}, ) diff --git a/workflow/sync/mutex_test.go b/workflow/sync/mutex_test.go index f92381855833..f5da7c6be41a 100644 --- a/workflow/sync/mutex_test.go +++ b/workflow/sync/mutex_test.go @@ -177,7 +177,6 @@ func TestMutexLock(t *testing.T) { concurrenyMgr.ReleaseAll(wf2) assert.Nil(t, wf2.Status.Synchronization) }) - } var mutexWfWithTmplLevel = ` @@ -284,9 +283,9 @@ func TestMutexTmplLevel(t *testing.T) { syncLimitFunc := GetSyncLimitFunc(kube) t.Run("TemplateLevelAcquireAndRelease", func(t *testing.T) { - //var nextKey string + // var nextKey string concurrenyMgr := NewLockManager(syncLimitFunc, func(key string) { - //nextKey = key + // nextKey = key }, WorkflowExistenceFunc) wf := unmarshalWF(mutexWfWithTmplLevel) tmpl := wf.Spec.Templates[1] diff --git a/workflow/sync/sync_manager.go b/workflow/sync/sync_manager.go index c07b902aae73..c407c1c2a871 100644 --- a/workflow/sync/sync_manager.go +++ b/workflow/sync/sync_manager.go @@ -10,9 +10,11 @@ import ( wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" ) -type NextWorkflow func(string) -type GetSyncLimit func(string) (int, error) -type IsWorkflowDeleted func(string) bool +type ( + NextWorkflow func(string) + GetSyncLimit func(string) (int, error) + IsWorkflowDeleted func(string) bool +) type Manager struct { syncLockMap map[string]Semaphore @@ -94,11 +96,7 @@ func (cm *Manager) Initialize(wfs []wfv1.Workflow) { mutex := cm.syncLockMap[holding.Mutex] if mutex == nil { - mutex, err := cm.initializeMutex(holding.Mutex) - if err != nil { - log.Warnf("Synchronization Mutex %s initialization failed. %v", holding.Mutex, err) - continue - } + mutex := cm.initializeMutex(holding.Mutex) if holding.Holder != "" { resourceKey := getResourceKey(wf.Namespace, wf.Name, holding.Holder) mutex.acquire(resourceKey) @@ -133,7 +131,7 @@ func (cm *Manager) TryAcquire(wf *wfv1.Workflow, nodeName string, syncLockRef *w case wfv1.SynchronizationTypeSemaphore: lock, err = cm.initializeSemaphore(lockKey) case wfv1.SynchronizationTypeMutex: - lock, err = cm.initializeMutex(lockKey) + lock = cm.initializeMutex(lockKey) default: return false, false, "", fmt.Errorf("unknown Synchronization Type") } @@ -312,8 +310,8 @@ func (cm *Manager) initializeSemaphore(semaphoreName string) (Semaphore, error) return NewSemaphore(semaphoreName, limit, cm.nextWorkflow, "semaphore"), nil } -func (cm *Manager) initializeMutex(mutexName string) (Semaphore, error) { - return NewMutex(mutexName, cm.nextWorkflow), nil +func (cm *Manager) initializeMutex(mutexName string) Semaphore { + return NewMutex(mutexName, cm.nextWorkflow) } func (cm *Manager) isSemaphoreSizeChanged(semaphore Semaphore) (bool, int, error) { diff --git a/workflow/sync/sync_manager_test.go b/workflow/sync/sync_manager_test.go index fd27f4ce01be..1a92fa669c9f 100644 --- a/workflow/sync/sync_manager_test.go +++ b/workflow/sync/sync_manager_test.go @@ -29,6 +29,7 @@ data: workflow: "1" template: "1" ` + const wfWithStatus = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -89,6 +90,7 @@ status: - hello-world-prtl9 semaphore: default/configmap/my-config/workflow ` + const wfWithSemaphore = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -285,6 +287,7 @@ status: phase: Running startedAt: "2020-06-04T19:55:11Z" ` + const wfWithMutex = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -326,7 +329,6 @@ func GetSyncLimitFunc(kube *fake.Clientset) func(string) (int, error) { ctx := context.Background() configMap, err := kube.CoreV1().ConfigMaps(items[0]).Get(ctx, items[2], metav1.GetOptions{}) - if err != nil { return 0, err } @@ -365,7 +367,6 @@ func TestSemaphoreWfLevel(t *testing.T) { }) t.Run("InitializeSynchronizationWithInvalid", func(t *testing.T) { concurrenyMgr := NewLockManager(syncLimitFunc, func(key string) { - }, WorkflowExistenceFunc) wf := unmarshalWF(wfWithStatus) invalidSync := []wfv1.SemaphoreHolding{{Semaphore: "default/configmap/my-config1/workflow", Holders: []string{"hello-world-vcrg5"}}} @@ -526,7 +527,6 @@ func TestResizeSemaphoreSize(t *testing.T) { assert.NotNil(t, wf2.Status.Synchronization) assert.NotNil(t, wf2.Status.Synchronization.Semaphore) assert.Equal(t, wf2.Name, wf2.Status.Synchronization.Semaphore.Holding[0].Holders[0]) - }) } @@ -542,9 +542,9 @@ func TestSemaphoreTmplLevel(t *testing.T) { syncLimitFunc := GetSyncLimitFunc(kube) t.Run("TemplateLevelAcquireAndRelease", func(t *testing.T) { - //var nextKey string + // var nextKey string concurrenyMgr := NewLockManager(syncLimitFunc, func(key string) { - //nextKey = key + // nextKey = key }, WorkflowExistenceFunc) wf := unmarshalWF(wfWithTmplSemaphore) tmpl := wf.Spec.Templates[2] @@ -584,7 +584,6 @@ func TestSemaphoreTmplLevel(t *testing.T) { assert.NotNil(t, wf.Status.Synchronization) assert.NotNil(t, wf.Status.Synchronization.Semaphore) assert.Equal(t, "semaphore-tmpl-level-xjvln-1607747183", wf.Status.Synchronization.Semaphore.Holding[0].Holders[0]) - }) } @@ -638,9 +637,9 @@ func TestMutexWfLevel(t *testing.T) { kube := fake.NewSimpleClientset() syncLimitFunc := GetSyncLimitFunc(kube) t.Run("WorkflowLevelMutexAcquireAndRelease", func(t *testing.T) { - //var nextKey string + // var nextKey string concurrenyMgr := NewLockManager(syncLimitFunc, func(key string) { - //nextKey = key + // nextKey = key }, WorkflowExistenceFunc) wf := unmarshalWF(wfWithMutex) wf1 := wf.DeepCopy() @@ -694,7 +693,7 @@ func TestCheckWorkflowExistence(t *testing.T) { syncLimitFunc := GetSyncLimitFunc(kube) t.Run("WorkflowDeleted", func(t *testing.T) { concurrenyMgr := NewLockManager(syncLimitFunc, func(key string) { - //nextKey = key + // nextKey = key }, func(s string) bool { return strings.Contains(s, "test1") }) @@ -721,5 +720,4 @@ func TestCheckWorkflowExistence(t *testing.T) { assert.Len(semaphore.getCurrentHolders(), 0) assert.Len(semaphore.getCurrentPending(), 0) }) - } diff --git a/workflow/sync/throttler.go b/workflow/sync/throttler.go index 10c230ce6e50..3fae1f7b27d4 100644 --- a/workflow/sync/throttler.go +++ b/workflow/sync/throttler.go @@ -37,6 +37,7 @@ func NewThrottler(parallelism int, queue func(key string)) Throttler { pending: &priorityQueue{itemByKey: make(map[string]*item)}, } } + func (t *throttler) Add(key string, priority int32, creationTime time.Time) { t.lock.Lock() defer t.lock.Unlock() diff --git a/workflow/templateresolution/context.go b/workflow/templateresolution/context.go index aeacca270902..7fceb4cde19f 100644 --- a/workflow/templateresolution/context.go +++ b/workflow/templateresolution/context.go @@ -4,14 +4,12 @@ import ( "context" "fmt" - "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" apierr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo-workflows/v3/errors" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" typed "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/workflow/common" ) @@ -24,7 +22,7 @@ type workflowTemplateInterfaceWrapper struct { clientset typed.WorkflowTemplateInterface } -func WrapWorkflowTemplateInterface(clientset v1alpha1.WorkflowTemplateInterface) WorkflowTemplateNamespacedGetter { +func WrapWorkflowTemplateInterface(clientset typed.WorkflowTemplateInterface) WorkflowTemplateNamespacedGetter { return &workflowTemplateInterfaceWrapper{clientset: clientset} } @@ -51,7 +49,7 @@ type ClusterWorkflowTemplateGetter interface { Get(name string) (*wfv1.ClusterWorkflowTemplate, error) } -func WrapClusterWorkflowTemplateInterface(clusterClientset v1alpha1.ClusterWorkflowTemplateInterface) ClusterWorkflowTemplateGetter { +func WrapClusterWorkflowTemplateInterface(clusterClientset typed.ClusterWorkflowTemplateInterface) ClusterWorkflowTemplateGetter { return &clusterWorkflowTemplateInterfaceWrapper{clientset: clusterClientset} } @@ -79,7 +77,7 @@ type Context struct { // workflow is the Workflow where templates will be stored workflow *wfv1.Workflow // log is a logrus entry. - log *logrus.Entry + log *log.Entry } // NewContext returns new Context. @@ -89,7 +87,7 @@ func NewContext(wftmplGetter WorkflowTemplateNamespacedGetter, cwftmplGetter Clu cwftmplGetter: cwftmplGetter, tmplBase: tmplBase, workflow: workflow, - log: log.WithFields(logrus.Fields{}), + log: log.WithFields(log.Fields{}), } } @@ -100,7 +98,7 @@ func NewContextFromClientset(wftmplClientset typed.WorkflowTemplateInterface, cl cwftmplGetter: WrapClusterWorkflowTemplateInterface(clusterWftmplClient), tmplBase: tmplBase, workflow: workflow, - log: log.WithFields(logrus.Fields{}), + log: log.WithFields(log.Fields{}), } } @@ -183,7 +181,7 @@ func (ctx *Context) ResolveTemplate(tmplHolder wfv1.TemplateReferenceHolder) (*C // resolved template include intermediate parameter passing. // The other fields are just merged and shallower templates overwrite deeper. func (ctx *Context) resolveTemplateImpl(tmplHolder wfv1.TemplateReferenceHolder, depth int) (*Context, *wfv1.Template, bool, error) { - ctx.log = ctx.log.WithFields(logrus.Fields{ + ctx.log = ctx.log.WithFields(log.Fields{ "depth": depth, "base": common.GetTemplateGetterString(ctx.tmplBase), "tmpl": common.GetTemplateHolderString(tmplHolder), diff --git a/workflow/ttlcontroller/ttlcontroller_test.go b/workflow/ttlcontroller/ttlcontroller_test.go index ef8396205ba8..153239e072a8 100644 --- a/workflow/ttlcontroller/ttlcontroller_test.go +++ b/workflow/ttlcontroller/ttlcontroller_test.go @@ -226,6 +226,7 @@ status: ttlStrategy: secondsAfterCompletion: 10 ` + var wftRefWithTTLinWF = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -420,7 +421,6 @@ func TestTTLStrategySucceded(t *testing.T) { enqueueWF(controller, un) controller.processNextWorkItem(ctx) assert.Equal(t, 1, controller.workqueue.Len()) - } func TestTTLStrategyFailed(t *testing.T) { @@ -446,8 +446,8 @@ func TestTTLStrategyFailed(t *testing.T) { assert.NoError(t, err) enqueueWF(controller, un) assert.Equal(t, 1, controller.workqueue.Len()) - } + func TestNoTTLStrategyFailed(t *testing.T) { var err error var un *unstructured.Unstructured @@ -466,7 +466,6 @@ func TestNoTTLStrategyFailed(t *testing.T) { assert.NoError(t, err) enqueueWF(controller, un) assert.Equal(t, 0, controller.workqueue.Len()) - } func TestTTLStrategyFromUnstructured(t *testing.T) { diff --git a/workflow/util/merge_test.go b/workflow/util/merge_test.go index a8a052d71029..6261fb78a5e4 100644 --- a/workflow/util/merge_test.go +++ b/workflow/util/merge_test.go @@ -23,6 +23,7 @@ spec: workflowTemplateRef: name: workflow-template-submittable ` + var patchWF = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -112,6 +113,7 @@ spec: secret: secretName: test ` + var wft = ` apiVersion: argoproj.io/v1alpha1 kind: WorkflowTemplate @@ -221,7 +223,7 @@ func TestJoinWfSpecs(t *testing.T) { assert := assert.New(t) wfDefault := unmarshalWF(wfDefault) wf1 := unmarshalWF(wf) - //wf1 := unmarshalWF(wf1) + // wf1 := unmarshalWF(wf1) wft := unmarshalWFT(wft) result := unmarshalWF(resultSpec) diff --git a/workflow/util/retry/retry.go b/workflow/util/retry/retry.go index 19a56b50e9fa..5b0b98e81d14 100644 --- a/workflow/util/retry/retry.go +++ b/workflow/util/retry/retry.go @@ -8,7 +8,7 @@ import ( // GetFailHosts returns slice of all child nodes with fail or error status func GetFailHosts(nodes wfv1.Nodes, parent string) []string { - var hostNames = []string{} + hostNames := []string{} failNodes := nodes.Children(parent). Filter(func(x wfv1.NodeStatus) bool { return x.Phase == wfv1.NodeFailed || x.Phase == wfv1.NodeError }). Map(func(x wfv1.NodeStatus) interface{} { return x.HostNodeName }) diff --git a/workflow/util/retry/retry_test.go b/workflow/util/retry/retry_test.go index ec7ce21b1bc5..a0cc04261698 100644 --- a/workflow/util/retry/retry_test.go +++ b/workflow/util/retry/retry_test.go @@ -20,7 +20,7 @@ func TestRemoveDuplicates(t *testing.T) { // func GetFailHosts(nodes wfv1.Nodes, parent string) []string { func TestGetFailHosts(t *testing.T) { - var nodes = wfv1.Nodes{ + nodes := wfv1.Nodes{ "retry_node": wfv1.NodeStatus{ ID: "retry_node", Phase: wfv1.NodeFailed, diff --git a/workflow/util/util.go b/workflow/util/util.go index d71796f62424..e37913fee96f 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -169,7 +169,6 @@ func IsWorkflowCompleted(wf *wfv1.Workflow) bool { // SubmitWorkflow validates and submit a single workflow and override some of the fields of the workflow func SubmitWorkflow(ctx context.Context, wfIf v1alpha1.WorkflowInterface, wfClientset wfclientset.Interface, namespace string, wf *wfv1.Workflow, opts *wfv1.SubmitOpts) (*wfv1.Workflow, error) { - err := ApplySubmitOpts(wf, opts) if err != nil { return nil, err @@ -813,13 +812,13 @@ func getNodeIDsToReset(restartSuccessful bool, nodeFieldSelector string, nodes w } else { for _, node := range nodes { if SelectorMatchesNode(selector, node) { - //traverse all children of the node + // traverse all children of the node var queue []string queue = append(queue, node.ID) for len(queue) > 0 { childNode := queue[0] - //if the child isn't already in nodeIDsToReset then we add it and traverse its children + // if the child isn't already in nodeIDsToReset then we add it and traverse its children if _, present := nodeIDsToReset[childNode]; !present { nodeIDsToReset[childNode] = true queue = append(queue, nodes[childNode].Children...) diff --git a/workflow/util/util_test.go b/workflow/util/util_test.go index c100c6f2612d..d6c963a75a9f 100644 --- a/workflow/util/util_test.go +++ b/workflow/util/util_test.go @@ -24,7 +24,6 @@ import ( // TestSubmitDryRun func TestSubmitDryRun(t *testing.T) { - workflowName := "test-dry-run" workflowYaml := ` apiVersion: argoproj.io/v1alpha1 @@ -298,11 +297,11 @@ func TestResumeWorkflowByNodeName(t *testing.T) { _, err := wfIf.Create(ctx, origWf, metav1.CreateOptions{}) assert.NoError(t, err) - //will return error as displayName does not match any nodes + // will return error as displayName does not match any nodes err = ResumeWorkflow(ctx, wfIf, hydratorfake.Noop, "suspend", "displayName=nonexistant") assert.Error(t, err) - //displayName didn't match suspend node so should still be running + // displayName didn't match suspend node so should still be running wf, err := wfIf.Get(ctx, "suspend", metav1.GetOptions{}) assert.NoError(t, err) assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes.FindByDisplayName("approve").Phase) @@ -310,7 +309,7 @@ func TestResumeWorkflowByNodeName(t *testing.T) { err = ResumeWorkflow(ctx, wfIf, hydratorfake.Noop, "suspend", "displayName=approve") assert.NoError(t, err) - //displayName matched node so has succeeded + // displayName matched node so has succeeded wf, err = wfIf.Get(ctx, "suspend", metav1.GetOptions{}) if assert.NoError(t, err) { assert.Equal(t, wfv1.NodeSucceeded, wf.Status.Nodes.FindByDisplayName("approve").Phase) @@ -325,11 +324,11 @@ func TestStopWorkflowByNodeName(t *testing.T) { _, err := wfIf.Create(ctx, origWf, metav1.CreateOptions{}) assert.NoError(t, err) - //will return error as displayName does not match any nodes + // will return error as displayName does not match any nodes err = StopWorkflow(ctx, wfIf, hydratorfake.Noop, "suspend", "displayName=nonexistant", "error occurred") assert.Error(t, err) - //displayName didn't match suspend node so should still be running + // displayName didn't match suspend node so should still be running wf, err := wfIf.Get(ctx, "suspend", metav1.GetOptions{}) assert.NoError(t, err) assert.Equal(t, wfv1.NodeRunning, wf.Status.Nodes.FindByDisplayName("approve").Phase) @@ -337,7 +336,7 @@ func TestStopWorkflowByNodeName(t *testing.T) { err = StopWorkflow(ctx, wfIf, hydratorfake.Noop, "suspend", "displayName=approve", "error occurred") assert.NoError(t, err) - //displayName matched node so has succeeded + // displayName matched node so has succeeded wf, err = wfIf.Get(ctx, "suspend", metav1.GetOptions{}) assert.NoError(t, err) assert.Equal(t, wfv1.NodeFailed, wf.Status.Nodes.FindByDisplayName("approve").Phase) diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index 653f5366f28e..ab500559dd33 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -84,9 +84,7 @@ const ( anyWorkflowOutputArtifactMagicValue = "workflow.outputs.artifacts.*" ) -var ( - placeholderGenerator = common.NewPlaceholderGenerator() -) +var placeholderGenerator = common.NewPlaceholderGenerator() type FakeArguments struct{} @@ -519,11 +517,10 @@ func resolveAllVariables(scope map[string]interface{}, tmplStr string) error { _, allowAllWorkflowOutputArtifactRefs := scope[anyWorkflowOutputArtifactMagicValue] fstTmpl, err := fasttemplate.NewTemplate(tmplStr, "{{", "}}") if err != nil { - return fmt.Errorf("unable to parse argo varaible: %w", err) + return fmt.Errorf("unable to parse argo variable: %w", err) } fstTmpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { - // Skip the custom variable references if !checkValidWorkflowVariablePrefix(tag) { return 0, nil @@ -945,7 +942,6 @@ func (ctx *templateValidationCtx) validateBaseImageOutputs(tmpl *wfv1.Template) return errors.Errorf(errors.CodeBadRequest, "templates.%s.outputs.artifacts.%s: %s", tmpl.Name, out.Name, errMsg) } } - } if tmpl.Script != nil { for _, volMnt := range tmpl.Script.VolumeMounts { @@ -1063,7 +1059,7 @@ func validateWorkflowFieldNames(slice interface{}) error { type dagValidationContext struct { tasks map[string]wfv1.DAGTask - dependencies map[string]map[string]common.DependencyType //map of DAG tasks, each one containing a map of [task it's dependent on] -> [dependency type] + dependencies map[string]map[string]common.DependencyType // map of DAG tasks, each one containing a map of [task it's dependent on] -> [dependency type] } func (d *dagValidationContext) GetTask(taskName string) *wfv1.DAGTask { @@ -1167,7 +1163,6 @@ func (ctx *templateValidationCtx) validateDAG(scope map[string]interface{}, tmpl return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s dependency '%s' not defined", tmpl.Name, task.Name, depName) - } else if depType == common.DependencyTypeItems && len(task.WithItems) == 0 && task.WithParam == "" && task.WithSequence == nil { return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s dependency '%s' uses an items-based condition such as .AnySucceeded or .AllFailed but does not contain any items", diff --git a/workflow/validate/validate_test.go b/workflow/validate/validate_test.go index 6080014e86dc..4e9e23f3851d 100644 --- a/workflow/validate/validate_test.go +++ b/workflow/validate/validate_test.go @@ -16,9 +16,11 @@ import ( "github.com/argoproj/argo-workflows/v3/workflow/templateresolution" ) -var wfClientset = fakewfclientset.NewSimpleClientset() -var wftmplGetter = templateresolution.WrapWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault)) -var cwftmplGetter = templateresolution.WrapClusterWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates()) +var ( + wfClientset = fakewfclientset.NewSimpleClientset() + wftmplGetter = templateresolution.WrapWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(metav1.NamespaceDefault)) + cwftmplGetter = templateresolution.WrapClusterWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates()) +) func createWorkflowTemplate(yamlStr string) error { ctx := context.Background() @@ -805,6 +807,7 @@ spec: parameters: - name: outparam ` + var invalidOutputMultipleValueFrom = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -1733,8 +1736,8 @@ func TestUnknownPodGCStrategy(t *testing.T) { assert.EqualError(t, err, "podGC.strategy unknown strategy 'Foo'") - for _, strat := range []wfv1.PodGCStrategy{wfv1.PodGCOnPodCompletion, wfv1.PodGCOnPodSuccess, wfv1.PodGCOnWorkflowCompletion, wfv1.PodGCOnWorkflowSuccess} { - wf.Spec.PodGC.Strategy = strat + for _, start := range []wfv1.PodGCStrategy{wfv1.PodGCOnPodCompletion, wfv1.PodGCOnPodSuccess, wfv1.PodGCOnWorkflowCompletion, wfv1.PodGCOnWorkflowSuccess} { + wf.Spec.PodGC.Strategy = start _, err = ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) assert.NoError(t, err) }