diff --git a/hack/test_report.go b/hack/test_report.go index 9ed240076470..4f310b7ec673 100755 --- a/hack/test_report.go +++ b/hack/test_report.go @@ -4,6 +4,7 @@ import ( "encoding/xml" "fmt" "io/ioutil" + "os" "strconv" "strings" ) @@ -40,6 +41,10 @@ func testReport() { for _, c := range s.TestCases { if c.Failure.Text != "" { x := newFailureText(s.Name, c.Failure.Text) + if x.file == "" { + _, _ = fmt.Fprintf(os.Stderr, "could not parse "+c.Failure.Text) + continue + } // https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message // Replace ā€˜/nā€™ with ā€˜%0Aā€™ for multiple strings output. _, _ = fmt.Printf("::error file=%s,line=%v,col=0::%s\n", x.file, x.line, x.message) @@ -61,12 +66,15 @@ func trimStdoutLines(text string) string { return strings.Join(split[i:], "\n") } } - panic(text) + return text } func newFailureText(suite, text string) failureText { text = trimStdoutLines(text) parts := strings.SplitN(text, ":", 3) + if len(parts) != 3 { + return failureText{} + } file := strings.TrimPrefix(suite, "github.com/argoproj/argo/") + "/" + parts[0] line, _ := strconv.Atoi(parts[1]) message := strings.ReplaceAll(strings.TrimSpace(parts[2]), "\n", "%0A") diff --git a/pkg/apis/workflow/register.go b/pkg/apis/workflow/register.go index ed2ec7a6083f..47a1b0f181a6 100644 --- a/pkg/apis/workflow/register.go +++ b/pkg/apis/workflow/register.go @@ -14,6 +14,7 @@ const ( WorkflowTemplatePlural string = "workflowtemplates" WorkflowTemplateShortName string = "wftmpl" WorkflowTemplateFullName string = WorkflowTemplatePlural + "." + Group + WorkflowEventBindingPlural string = "workfloweventbindings" CronWorkflowKind string = "CronWorkflow" CronWorkflowSingular string = "cronworkflow" CronWorkflowPlural string = "cronworkflows" diff --git a/test/e2e/cli_test.go b/test/e2e/cli_test.go index 604034c54578..6b0f147d48f0 100644 --- a/test/e2e/cli_test.go +++ b/test/e2e/cli_test.go @@ -31,9 +31,12 @@ func (s *CLISuite) BeforeTest(suiteName, testName string) { } func (s *CLISuite) testNeedsOffloading() { - skip := s.Persistence.IsEnabled() && os.Getenv("ARGO_SERVER") == "" - if skip { - s.T().Skip("test needs offloading, but not Argo Server available") + serverUnavailable := os.Getenv("ARGO_SERVER") == "" + if s.Persistence.IsEnabled() && serverUnavailable { + if !serverUnavailable { + s.T().Skip("test needs offloading, but the Argo Server is unavailable - if `testNeedsOffloading()` is the first line of your test test, you should move your test to `CliWithServerSuite`?") + } + s.T().Skip("test needs offloading, but offloading not enabled") } } @@ -278,7 +281,7 @@ func (s *CLISuite) TestRoot() { }) } -func (s *CLISuite) TestWorkflowSuspendResume() { +func (s *CLIWithServerSuite) TestWorkflowSuspendResume() { s.testNeedsOffloading() s.Given(). Workflow("@testdata/sleep-3s.yaml"). @@ -302,7 +305,7 @@ func (s *CLISuite) TestWorkflowSuspendResume() { }) } -func (s *CLISuite) TestNodeSuspendResume() { +func (s *CLIWithServerSuite) TestNodeSuspendResume() { s.testNeedsOffloading() s.Given(). Workflow("@testdata/node-suspend.yaml"). @@ -582,7 +585,7 @@ func (s *CLISuite) TestWorkflowLint() { }) } -func (s *CLISuite) TestWorkflowRetry() { +func (s *CLIWithServerSuite) TestWorkflowRetry() { s.testNeedsOffloading() var retryTime corev1.Time @@ -635,7 +638,7 @@ func (s *CLISuite) TestWorkflowTerminate() { }) } -func (s *CLISuite) TestWorkflowWait() { +func (s *CLIWithServerSuite) TestWorkflowWait() { s.testNeedsOffloading() s.Given(). Workflow("@smoke/basic.yaml"). @@ -649,7 +652,7 @@ func (s *CLISuite) TestWorkflowWait() { }) } -func (s *CLISuite) TestWorkflowWatch() { +func (s *CLIWithServerSuite) TestWorkflowWatch() { s.testNeedsOffloading() s.Given(). Workflow("@smoke/basic.yaml"). @@ -948,7 +951,7 @@ func (s *CLISuite) TestWorkflowTemplateRefSubmit() { }) } -func (s *CLISuite) TestWorkflowLevelSemaphore() { +func (s *CLIWithServerSuite) TestWorkflowLevelSemaphore() { semaphoreData := map[string]string{ "workflow": "1", } @@ -970,14 +973,14 @@ func (s *CLISuite) TestWorkflowLevelSemaphore() { return wf.Status.Phase == "" }, "Workflow is waiting for lock", 20*time.Second). WaitForWorkflow(30 * time.Second). - DeleteConfigMap(). + DeleteConfigMap("my-config"). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.NodeSucceeded, status.Phase) }) } -func (s *CLISuite) TestTemplateLevelSemaphore() { +func (s *CLIWithServerSuite) TestTemplateLevelSemaphore() { semaphoreData := map[string]string{ "template": "1", } @@ -994,8 +997,7 @@ func (s *CLISuite) TestTemplateLevelSemaphore() { RunCli([]string{"get", "semaphore-tmpl-level"}, func(t *testing.T, output string, err error) { assert.Contains(t, output, "Waiting for") }). - WaitForWorkflow(30 * time.Second). - DeleteConfigMap() + WaitForWorkflow(30 * time.Second) } func (s *CLISuite) TestRetryOmit() { @@ -1004,7 +1006,11 @@ func (s *CLISuite) TestRetryOmit() { Workflow("@testdata/retry-omit.yaml"). When(). SubmitWorkflow(). - WaitForWorkflow(20*time.Second). + WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool { + return wf.Status.Nodes.Any(func(node wfv1.NodeStatus) bool { + return node.Phase == wfv1.NodeOmitted + }) + }, "any node omitted", 20*time.Second). Then(). ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { node := status.Nodes.FindByDisplayName("should-not-execute") @@ -1019,7 +1025,7 @@ func (s *CLISuite) TestRetryOmit() { WaitForWorkflow(20 * time.Second) } -func (s *CLISuite) TestResourceTemplateStopAndTerminate() { +func (s *CLIWithServerSuite) TestResourceTemplateStopAndTerminate() { s.testNeedsOffloading() s.Run("ResourceTemplateStop", func() { s.Given(). diff --git a/test/e2e/fixtures/e2e_suite.go b/test/e2e/fixtures/e2e_suite.go index a9c83a3dadfa..674aa791ec11 100644 --- a/test/e2e/fixtures/e2e_suite.go +++ b/test/e2e/fixtures/e2e_suite.go @@ -7,6 +7,10 @@ import ( "strings" "time" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + // load the azure plugin (required to authenticate against AKS clusters). _ "k8s.io/client-go/plugin/pkg/client/auth/azure" // load the gcp plugin (required to authenticate against GKE clusters). @@ -17,13 +21,12 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/suite" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "sigs.k8s.io/yaml" + "github.com/argoproj/argo/pkg/apis/workflow" "github.com/argoproj/argo/pkg/client/clientset/versioned" "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1" "github.com/argoproj/argo/util/kubeconfig" @@ -47,9 +50,6 @@ type E2ESuite struct { cronClient v1alpha1.CronWorkflowInterface KubeClient kubernetes.Interface hydrator hydrator.Interface - // Guard-rail. - // The number of archived workflows. If is changes between two tests, we have a problem. - numWorkflows int } func (s *E2ESuite) SetupSuite() { @@ -82,153 +82,60 @@ func (s *E2ESuite) BeforeTest(suiteName, testName string) { s.CheckError(err) log.Infof("logging debug diagnostics to file://%s", name) s.DeleteResources(Label) - numWorkflows := s.countWorkflows() - if s.numWorkflows > 0 && s.numWorkflows != numWorkflows { - s.T().Fatal("there should almost never be a change to the number of workflows between tests, this means the last test (not the current test) is bad and needs fixing - note this guard-rail does not work across test suites") - } - s.numWorkflows = numWorkflows } -func (s *E2ESuite) countWorkflows() int { - workflows, err := s.wfClient.List(metav1.ListOptions{}) - s.CheckError(err) - return len(workflows.Items) -} +var foreground = metav1.DeletePropagationForeground +var foregroundDelete = &metav1.DeleteOptions{PropagationPolicy: &foreground} func (s *E2ESuite) DeleteResources(label string) { - // delete all cron workflows - cronList, err := s.cronClient.List(metav1.ListOptions{LabelSelector: label}) - s.CheckError(err) - for _, cronWf := range cronList.Items { - log.WithFields(log.Fields{"cronWorkflow": cronWf.Name}).Debug("Deleting cron workflow") - err = s.cronClient.Delete(cronWf.Name, nil) + // delete archived workflows from the archive + if s.Persistence.IsEnabled() { + archive := s.Persistence.workflowArchive + parse, err := labels.ParseToRequirements(label) s.CheckError(err) - } - - // It is possible for a pod to become orphaned. This means that it's parent workflow - // (as set in the "workflows.argoproj.io/workflow" label) does not exist. - // We need to delete orphans as well as test pods. - // Get a list of all workflows. - // if absent from this this it has been delete - so any associated pods are orphaned - // if in the list it is either a test wf or not - isTestWf := make(map[string]bool) - { - list, err := s.wfClient.List(metav1.ListOptions{LabelSelector: label}) + workflows, err := archive.ListWorkflows(Namespace, time.Time{}, time.Time{}, parse, 0, 0) s.CheckError(err) - for _, wf := range list.Items { - isTestWf[wf.Name] = false - if s.Persistence.IsEnabled() && wf.Status.IsOffloadNodeStatus() { - err := s.Persistence.offloadNodeStatusRepo.Delete(string(wf.UID), wf.Status.OffloadNodeStatusVersion) - s.CheckError(err) - } + for _, w := range workflows { + err := archive.DeleteWorkflow(string(w.UID)) + s.CheckError(err) } } - // delete from the archive - { - if s.Persistence.IsEnabled() { - archive := s.Persistence.workflowArchive - parse, err := labels.ParseToRequirements(Label) - s.CheckError(err) - workflows, err := archive.ListWorkflows(Namespace, time.Time{}, time.Time{}, parse, 0, 0) - s.CheckError(err) - for _, workflow := range workflows { - err := archive.DeleteWorkflow(string(workflow.UID)) - s.CheckError(err) - } - } + hasTestLabel := metav1.ListOptions{LabelSelector: label} + resources := []schema.GroupVersionResource{ + {Group: workflow.Group, Version: workflow.Version, Resource: workflow.CronWorkflowPlural}, + {Group: workflow.Group, Version: workflow.Version, Resource: workflow.WorkflowEventBindingPlural}, + {Group: workflow.Group, Version: workflow.Version, Resource: workflow.WorkflowPlural}, + {Group: workflow.Group, Version: workflow.Version, Resource: workflow.WorkflowTemplatePlural}, + {Group: workflow.Group, Version: workflow.Version, Resource: workflow.ClusterWorkflowTemplatePlural}, + {Version: "v1", Resource: "resourcequotas"}, + {Version: "v1", Resource: "configmaps"}, } - // delete all workflows - { - list, err := s.wfClient.List(metav1.ListOptions{LabelSelector: Label}) + for _, r := range resources { + err := s.dynamicFor(r).DeleteCollection(foregroundDelete, hasTestLabel) s.CheckError(err) - for _, wf := range list.Items { - logCtx := log.WithFields(log.Fields{"workflow": wf.Name}) - logCtx.Debug("Deleting workflow") - err = s.wfClient.Delete(wf.Name, &metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - continue - } - s.CheckError(err) - isTestWf[wf.Name] = true - for { - _, err := s.wfClient.Get(wf.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - break - } - logCtx.Debug("Waiting for workflow to be deleted") - time.Sleep(1 * time.Second) - } - } } - // delete workflow pods - { - podInterface := s.KubeClient.CoreV1().Pods(Namespace) - // it seems "argo delete" can leave pods behind - pods, err := podInterface.List(metav1.ListOptions{LabelSelector: "workflows.argoproj.io/workflow"}) - s.CheckError(err) - for _, pod := range pods.Items { - workflow := pod.GetLabels()["workflows.argoproj.io/workflow"] - testPod, owned := isTestWf[workflow] - if testPod || !owned { - logCtx := log.WithFields(log.Fields{"workflow": workflow, "podName": pod.Name, "testPod": testPod, "owned": owned}) - logCtx.Debug("Deleting pod") - err := podInterface.Delete(pod.Name, nil) - if !errors.IsNotFound(err) { - s.CheckError(err) - } - for { - _, err := podInterface.Get(pod.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - break - } - logCtx.Debug("Waiting for pod to be deleted") - time.Sleep(1 * time.Second) - } + for _, r := range resources { + for { + list, err := s.dynamicFor(r).List(hasTestLabel) + s.CheckError(err) + if len(list.Items) == 0 { + break } + time.Sleep(time.Second) } } +} - // delete all workflow events - events, err := s.wfebClient.List(metav1.ListOptions{LabelSelector: label}) - s.CheckError(err) - - for _, item := range events.Items { - log.WithField("template", item.Name).Debug("Deleting workflow event") - err = s.wfebClient.Delete(item.Name, nil) - s.CheckError(err) - } - - // delete all workflow templates - wfTmpl, err := s.wfTemplateClient.List(metav1.ListOptions{LabelSelector: label}) - s.CheckError(err) - - for _, wfTmpl := range wfTmpl.Items { - log.WithField("template", wfTmpl.Name).Debug("Deleting workflow template") - err = s.wfTemplateClient.Delete(wfTmpl.Name, nil) - s.CheckError(err) - } - - // delete all cluster workflow templates - cwfTmpl, err := s.cwfTemplateClient.List(metav1.ListOptions{LabelSelector: label}) - s.CheckError(err) - for _, cwfTmpl := range cwfTmpl.Items { - log.WithField("template", cwfTmpl.Name).Debug("Deleting cluster workflow template") - err = s.cwfTemplateClient.Delete(cwfTmpl.Name, nil) - s.CheckError(err) - } - - // Delete all resourcequotas - rqList, err := s.KubeClient.CoreV1().ResourceQuotas(Namespace).List(metav1.ListOptions{LabelSelector: label}) - s.CheckError(err) - for _, rq := range rqList.Items { - log.WithField("resourcequota", rq.Name).Debug("Deleting resource quota") - err = s.KubeClient.CoreV1().ResourceQuotas(Namespace).Delete(rq.Name, nil) - s.CheckError(err) +func (s *E2ESuite) dynamicFor(r schema.GroupVersionResource) dynamic.ResourceInterface { + resourceInterface := dynamic.NewForConfigOrDie(s.RestConfig).Resource(r) + if r.Resource == workflow.ClusterWorkflowTemplatePlural { + return resourceInterface } + return resourceInterface.Namespace(Namespace) } func (s *E2ESuite) CheckError(err error) { diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go index 1d66bff40a3e..76b82515e4df 100644 --- a/test/e2e/fixtures/when.go +++ b/test/e2e/fixtures/when.go @@ -6,13 +6,12 @@ import ( log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" - apierr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1" - "github.com/argoproj/argo/test/util" "github.com/argoproj/argo/workflow/hydrator" ) @@ -33,9 +32,6 @@ type When struct { wfTemplateNames []string cronWorkflowName string kubeClient kubernetes.Interface - resourceQuota *corev1.ResourceQuota - storageQuota *corev1.ResourceQuota - configMap *corev1.ConfigMap } func (w *When) SubmitWorkflow() *When { @@ -133,8 +129,7 @@ func (w *When) waitForWorkflow(workflowName string, test func(wf *wfv1.Workflow) fieldSelector = "metadata.name=" + workflowName } - logCtx := log.WithFields(log.Fields{"fieldSelector": fieldSelector, "condition": condition, "timeout": timeout}) - logCtx.Info("Waiting for condition") + log.WithFields(log.Fields{"fieldSelector": fieldSelector}).Infof("Waiting %v for workflow %s", timeout, condition) opts := metav1.ListOptions{LabelSelector: Label, FieldSelector: fieldSelector} watch, err := w.client.Watch(opts) if err != nil { @@ -151,10 +146,9 @@ func (w *When) waitForWorkflow(workflowName string, test func(wf *wfv1.Workflow) case event := <-watch.ResultChan(): wf, ok := event.Object.(*wfv1.Workflow) if ok { - logCtx.WithFields(log.Fields{"workflow": wf.Name, "type": event.Type, "phase": wf.Status.Phase, "message": wf.Status.Message}).Info("...") w.hydrateWorkflow(wf) if test(wf) { - logCtx.Infof("Condition met after %v", time.Since(start).Truncate(time.Second)) + log.Infof("Condition met after %v", time.Since(start).Truncate(time.Second)) w.workflowName = wf.Name return w } @@ -234,70 +228,61 @@ func (w *When) RunCli(args []string, block func(t *testing.T, output string, err func (w *When) CreateConfigMap(name string, data map[string]string) *When { w.t.Helper() - //Clean if same map is already exist - err := w.kubeClient.CoreV1().ConfigMaps("argo").Delete(name, &metav1.DeleteOptions{}) - if err != nil { - if !apierr.IsNotFound(err) { - panic(err) - } - } - obj, err := util.CreateConfigMap(w.kubeClient, "argo", name, data) + _, err := w.kubeClient.CoreV1().ConfigMaps(Namespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{Label: "true"}}, + Data: data, + }) if err != nil { w.t.Fatal(err) } - w.configMap = obj return w } -func (w *When) DeleteConfigMap() *When { +func (w *When) DeleteConfigMap(name string) *When { w.t.Helper() - err := util.DeleteConfigMap(w.kubeClient, w.configMap) + err := w.kubeClient.CoreV1().ConfigMaps(Namespace).Delete(name, nil) if err != nil { - if !apierr.IsNotFound(err) { - w.t.Fatal(err) - } + w.t.Fatal(err) } - w.configMap = nil return w } -func (w *When) MemoryQuota(quota string) *When { +func (w *When) MemoryQuota(memoryLimit string) *When { w.t.Helper() - obj, err := util.CreateHardMemoryQuota(w.kubeClient, "argo", "memory-quota", quota) - if err != nil { - w.t.Fatal(err) - } - w.resourceQuota = obj - return w + return w.createResourceQuota("memory-quota", corev1.ResourceList{corev1.ResourceLimitsMemory: resource.MustParse(memoryLimit)}) } -func (w *When) StorageQuota(quota string) *When { +func (w *When) StorageQuota(storageLimit string) *When { w.t.Helper() - obj, err := util.CreateHardStorageQuota(w.kubeClient, "argo", "storage-quota", quota) - if err != nil { - w.t.Fatal(err) - } - w.storageQuota = obj - return w + return w.createResourceQuota("storage-quota", corev1.ResourceList{"requests.storage": resource.MustParse(storageLimit)}) } -func (w *When) DeleteStorageQuota() *When { +func (w *When) createResourceQuota(name string, rl corev1.ResourceList) *When { w.t.Helper() - err := util.DeleteQuota(w.kubeClient, w.storageQuota) + _, err := w.kubeClient.CoreV1().ResourceQuotas(Namespace).Create(&corev1.ResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"argo-e2e": "true"}}, + Spec: corev1.ResourceQuotaSpec{Hard: rl}, + }) if err != nil { w.t.Fatal(err) } - w.storageQuota = nil return w } -func (w *When) DeleteQuota() *When { +func (w *When) DeleteStorageQuota() *When { + return w.deleteResourceQuota("storage-quota") +} + +func (w *When) DeleteMemoryQuota() *When { + return w.deleteResourceQuota("memory-quota") +} + +func (w *When) deleteResourceQuota(name string) *When { w.t.Helper() - err := util.DeleteQuota(w.kubeClient, w.resourceQuota) + err := w.kubeClient.CoreV1().ResourceQuotas(Namespace).Delete(name, foregroundDelete) if err != nil { w.t.Fatal(err) } - w.resourceQuota = nil return w } diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go index 074df036325f..ef755dad48f1 100644 --- a/test/e2e/functional_test.go +++ b/test/e2e/functional_test.go @@ -330,7 +330,7 @@ spec: wfv1.NodePending == b.Phase && regexp.MustCompile(`^Pending \d+\.\d+s$`).MatchString(b.Message) }, "pods pending", 30*time.Second). - DeleteQuota(). + DeleteMemoryQuota(). WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool { a := wf.Status.Nodes.FindByDisplayName("a") b := wf.Status.Nodes.FindByDisplayName("b") @@ -381,7 +381,7 @@ spec: wfv1.NodePending == b.Phase && regexp.MustCompile(`^Pending \d+\.\d+s$`).MatchString(b.Message) }, "pods pending", 30*time.Second). - DeleteQuota(). + DeleteMemoryQuota(). WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool { a := wf.Status.Nodes.FindByDisplayName("a(0)") b := wf.Status.Nodes.FindByDisplayName("b(0)") @@ -715,7 +715,7 @@ spec: `). When(). SubmitWorkflow(). - WaitForWorkflow(10 * time.Second). + WaitForWorkflow(15 * time.Second). Then(). ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { assert.Equal(t, wfv1.NodeFailed, status.Phase) diff --git a/test/e2e/malformed_resources_test.go b/test/e2e/malformed_resources_test.go index 9487cdd6d213..c422e38c6bf5 100644 --- a/test/e2e/malformed_resources_test.go +++ b/test/e2e/malformed_resources_test.go @@ -19,15 +19,6 @@ type MalformedResourcesSuite struct { fixtures.E2ESuite } -func (s *MalformedResourcesSuite) AfterTest(suiteName, testName string) { - - // delete any malformed items first, as they'll break later clean-up - _, err := fixtures.Exec("kubectl", "-n", fixtures.Namespace, "delete", "workflows,workflowtemplates,clusterworkflowtemplates,cronworkflows", "-l", "argo-e2e=malformed") - s.CheckError(err) - - s.E2ESuite.AfterTest(suiteName, testName) -} - func (s *MalformedResourcesSuite) TestMalformedWorkflow() { s.Given(). Exec("kubectl", []string{"apply", "-f", "testdata/malformed/malformed-workflow.yaml"}, fixtures.NoError). diff --git a/test/util/configmap.go b/test/util/configmap.go deleted file mode 100644 index 07fd0b001230..000000000000 --- a/test/util/configmap.go +++ /dev/null @@ -1,24 +0,0 @@ -package util - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func CreateConfigMap(clientset kubernetes.Interface, namespace, name string, data map[string]string) (*corev1.ConfigMap, error) { - return clientset.CoreV1().ConfigMaps(namespace).Create(&corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"argo-e2e": "true"}, - }, - Data: data, - }) -} - -func DeleteConfigMap(clientset kubernetes.Interface, cm *corev1.ConfigMap) error { - if cm == nil { - return nil - } - return clientset.CoreV1().ConfigMaps(cm.Namespace).Delete(cm.Name, &metav1.DeleteOptions{}) -} diff --git a/test/util/resourcequota.go b/test/util/resourcequota.go deleted file mode 100644 index 63874b8c1bc5..000000000000 --- a/test/util/resourcequota.go +++ /dev/null @@ -1,41 +0,0 @@ -package util - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func CreateHardMemoryQuota(clientset kubernetes.Interface, namespace, name, memoryLimit string) (*corev1.ResourceQuota, error) { - resourceList := corev1.ResourceList{ - corev1.ResourceLimitsMemory: resource.MustParse(memoryLimit), - } - return CreateResourceQuota(clientset, namespace, name, resourceList) -} - -func CreateHardStorageQuota(clientset kubernetes.Interface, namespace, name, storageLimit string) (*corev1.ResourceQuota, error) { - resourceList := corev1.ResourceList{ - "requests.storage": resource.MustParse(storageLimit), - } - return CreateResourceQuota(clientset, namespace, name, resourceList) -} - -func CreateResourceQuota(clientset kubernetes.Interface, namespace, name string, rl corev1.ResourceList) (*corev1.ResourceQuota, error) { - return clientset.CoreV1().ResourceQuotas(namespace).Create(&corev1.ResourceQuota{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"argo-e2e": "true"}, - }, - Spec: corev1.ResourceQuotaSpec{ - Hard: rl, - }, - }) -} - -func DeleteQuota(clientset kubernetes.Interface, quota *corev1.ResourceQuota) error { - if quota == nil { - return nil - } - return clientset.CoreV1().ResourceQuotas(quota.Namespace).Delete(quota.Name, &metav1.DeleteOptions{}) -}