diff --git a/docs/using.md b/docs/using.md index 0beaad18a2c..acc87a9cf45 100644 --- a/docs/using.md +++ b/docs/using.md @@ -122,9 +122,14 @@ specific contract. #### Entrypoint When containers are run in a `Task`, the `entrypoint` of the container will be -overwritten with a custom binary that redirects the logs to a separate location -for aggregating the log output. As such, it is always recommended to explicitly -specify a command. +overwritten with a custom binary. The plan is to use this custom binary for +controlling the execution of step containers ([#224](https://github.com/knative/build-pipeline/issues/224)) and log streaming +[#107](https://github.com/knative/build-pipeline/issues/107), though currently +it will write logs only to an [`emptyDir`]() (which cannot be read from after +the pod has finished executing, so logs must be obtained +[via k8s logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/), +using a tool such as [test/logs/README.md](../test/logs/README.md), +or setting up an external system to consume logs). When `command` is not explicitly set, the controller will attempt to lookup the entrypoint from the remote registry. diff --git a/pkg/reconciler/v1alpha1/taskrun/taskrun.go b/pkg/reconciler/v1alpha1/taskrun/taskrun.go index aef0bc77061..5868ada5643 100644 --- a/pkg/reconciler/v1alpha1/taskrun/taskrun.go +++ b/pkg/reconciler/v1alpha1/taskrun/taskrun.go @@ -38,11 +38,9 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" coreinformers "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" ) @@ -67,8 +65,6 @@ const ( taskRunAgentName = "taskrun-controller" // taskRunControllerName defines name for TaskRun Controller taskRunControllerName = "TaskRun" - - pvcSizeBytes = 5 * 1024 * 1024 * 1024 // 5 GBs ) var ( @@ -263,19 +259,8 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1alpha1.TaskRun) error return err } } else { - pvc, err := c.KubeClientSet.CoreV1().PersistentVolumeClaims(tr.Namespace).Get(tr.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - // Create a persistent volume claim to hold Build logs - pvc, err = createPVC(c.KubeClientSet, tr) - if err != nil { - return fmt.Errorf("Failed to create persistent volume claim %s for task %q: %v", tr.Name, err, tr.Name) - } - } else if err != nil { - c.Logger.Errorf("Failed to reconcile taskrun: %q, failed to get pvc %q: %v", tr.Name, tr.Name, err) - return err - } // Build pod is not present, create build pod. - pod, err = c.createBuildPod(ctx, tr, rtr.TaskSpec, rtr.TaskName, pvc.Name) + pod, err = c.createBuildPod(ctx, tr, rtr.TaskSpec, rtr.TaskName) if err != nil { // This Run has failed, so we need to mark it as failed and stop reconciling it var msg string @@ -354,40 +339,9 @@ func (c *Reconciler) updateStatus(taskrun *v1alpha1.TaskRun) (*v1alpha1.TaskRun, return newtaskrun, nil } -// createPVC will create a persistent volume mount for tr which -// will be used to gather logs using the entrypoint wrapper -func createPVC(kc kubernetes.Interface, tr *v1alpha1.TaskRun) (*corev1.PersistentVolumeClaim, error) { - v, err := kc.CoreV1().PersistentVolumeClaims(tr.Namespace).Create( - &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: tr.Namespace, - // This pvc is specific to this TaskRun, so we'll use the same name - Name: tr.Name, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(tr, groupVersionKind), - }, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage: *resource.NewQuantity(pvcSizeBytes, resource.BinarySI), - }, - }, - }, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to claim Persistent Volume %q due to error: %s", tr.Name, err) - } - return v, nil -} - // createPod creates a Pod based on the Task's configuration, with pvcName as a // volumeMount -func (c *Reconciler) createBuildPod(ctx context.Context, tr *v1alpha1.TaskRun, ts *v1alpha1.TaskSpec, taskName, pvcName string) (*corev1.Pod, error) { +func (c *Reconciler) createBuildPod(ctx context.Context, tr *v1alpha1.TaskRun, ts *v1alpha1.TaskSpec, taskName string) (*corev1.Pod, error) { // TODO: Preferably use Validate on task.spec to catch validation error bs := ts.GetBuildSpec() if bs == nil { @@ -420,7 +374,7 @@ func (c *Reconciler) createBuildPod(ctx context.Context, tr *v1alpha1.TaskRun, t } } - build, err := createRedirectedBuild(ctx, bSpec, pvcName, tr) + build, err := createRedirectedBuild(ctx, bSpec, tr) if err != nil { return nil, fmt.Errorf("couldn't create redirected Build: %v", err) } @@ -474,7 +428,7 @@ func (c *Reconciler) createBuildPod(ctx context.Context, tr *v1alpha1.TaskRun, t // an entrypoint cache creates a build where all entrypoints are switched to // be the entrypoint redirector binary. This function assumes that it receives // its own copy of the BuildSpec and modifies it freely -func createRedirectedBuild(ctx context.Context, bs *buildv1alpha1.BuildSpec, pvcName string, tr *v1alpha1.TaskRun) (*buildv1alpha1.Build, error) { +func createRedirectedBuild(ctx context.Context, bs *buildv1alpha1.BuildSpec, tr *v1alpha1.TaskRun) (*buildv1alpha1.Build, error) { // Pass service account name from taskrun to build bs.ServiceAccountName = tr.Spec.ServiceAccount @@ -505,9 +459,9 @@ func createRedirectedBuild(ctx context.Context, bs *buildv1alpha1.BuildSpec, pvc b.Spec.Volumes = append(b.Spec.Volumes, corev1.Volume{ Name: entrypoint.MountName, VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, + // TODO(#107) we need to actually stream these logs somewhere, probably via sidecar. + // Currently these logs will be lost when the pod is unscheduled. + EmptyDir: &corev1.EmptyDirVolumeSource{}, }, }) diff --git a/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go b/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go index 2ea8096bb62..eae78b441ed 100644 --- a/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go +++ b/pkg/reconciler/v1alpha1/taskrun/taskrun_test.go @@ -35,7 +35,6 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" fakekubeclientset "k8s.io/client-go/kubernetes/fake" @@ -107,36 +106,11 @@ var ( )) ) -func getExpectedPVC(tr *v1alpha1.TaskRun) *corev1.PersistentVolumeClaim { - return &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: tr.Namespace, - // This pvc is specific to this TaskRun, so we'll use the same name - Name: tr.Name, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(tr, groupVersionKind), - }, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage: *resource.NewQuantity(pvcSizeBytes, resource.BinarySI), - }, - }, - }, - } -} - func getToolsVolume(claimName string) corev1.Volume { return corev1.Volume{ Name: toolsMountName, VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: claimName, - }, + EmptyDir: &corev1.EmptyDirVolumeSource{}, }, } } @@ -385,27 +359,6 @@ func TestReconcile(t *testing.T) { if len(clients.Kube.Actions()) == 0 { t.Fatalf("Expected actions to be logged in the kubeclient, got none") } - - pvc, err := clients.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - t.Errorf("Failed to fetch build: %v", err) - } - - expectedVolume := getExpectedPVC(tr) - if d := cmp.Diff(pvc.Name, expectedVolume.Name); d != "" { - t.Errorf("pvc doesn't match, diff: %s", d) - } - if d := cmp.Diff(pvc.OwnerReferences, expectedVolume.OwnerReferences); d != "" { - t.Errorf("pvc doesn't match, diff: %s", d) - } - if d := cmp.Diff(pvc.Spec.AccessModes, expectedVolume.Spec.AccessModes); d != "" { - t.Errorf("pvc doesn't match, diff: %s", d) - } - if pvc.Spec.Resources.Requests["storage"] != expectedVolume.Spec.Resources.Requests["storage"] { - t.Errorf("pvc doesn't match, got: %v, expected: %v", - pvc.Spec.Resources.Requests["storage"], - expectedVolume.Spec.Resources.Requests["storage"]) - } }) } } @@ -750,7 +703,7 @@ func TestCreateRedirectedBuild(t *testing.T) { expectedSteps := len(bs.Steps) + 1 expectedVolumes := len(bs.Volumes) + 1 - b, err := createRedirectedBuild(ctx, &bs, "pvc", tr) + b, err := createRedirectedBuild(ctx, &bs, tr) if err != nil { t.Errorf("expected createRedirectedBuild to pass: %v", err) } diff --git a/test/cancel_test.go b/test/cancel_test.go index b113ca4c843..dd558aa559c 100644 --- a/test/cancel_test.go +++ b/test/cancel_test.go @@ -44,7 +44,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { tb.Step("foo", "ubuntu", tb.Command("/bin/bash"), tb.Args("-c", "sleep 500")), )) if _, err := c.TaskClient.Create(task); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", hwTaskName, err) + t.Fatalf("Failed to create Task `banana`: %s", err) } pipeline := tb.Pipeline("tomatoes", namespace, @@ -63,7 +63,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { c := pr.Status.GetCondition(duckv1alpha1.ConditionSucceeded) if c != nil { if c.Status == corev1.ConditionTrue || c.Status == corev1.ConditionFalse { - return true, fmt.Errorf("pipelineRun %s already finished!", "pear") + return true, fmt.Errorf("pipelineRun %s already finished", "pear") } else if c.Status == corev1.ConditionUnknown && (c.Reason == "Running" || c.Reason == "Pending") { return true, nil } @@ -113,10 +113,10 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { } return false, nil }, "PipelineRunCancelled"); err != nil { - t.Errorf("Error waiting for TaskRun %s to finish: %s", hwTaskRunName, err) + t.Errorf("Error waiting for PipelineRun `pear` to finish: %s", err) } - logger.Infof("Waiting for TaskRun %s in namespace %s to be cancelled", hwTaskRunName, namespace) + logger.Infof("Waiting for TaskRun `pear-foo` in namespace %s to be cancelled", namespace) if err := WaitForTaskRunState(c, "pear-foo", func(tr *v1alpha1.TaskRun) (bool, error) { c := tr.Status.GetCondition(duckv1alpha1.ConditionSucceeded) if c != nil { diff --git a/test/crd.go b/test/crd.go deleted file mode 100644 index 18d592e3db2..00000000000 --- a/test/crd.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// crd contains defintions of resource instances which are useful across integration tests - -package test - -import ( - "bufio" - "bytes" - "fmt" - "io" - "testing" - - "github.com/knative/pkg/test/logging" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" - tb "github.com/knative/build-pipeline/test/builder" -) - -const ( - hwTaskName = "helloworld" - hwTaskRunName = "helloworld-run" - hwValidationPodName = "helloworld-validation-busybox" - hwPipelineName = "helloworld-pipeline" - hwPipelineRunName = "helloworld-pipelinerun" - hwPipelineTaskName1 = "helloworld-task-1" - hwPipelineTaskName2 = "helloworld-task-2" - hwSecret = "helloworld-secret" - hwSA = "helloworld-sa" - - logPath = "/logs" - logFile = "process-log.txt" - - hwContainerName = "helloworld-busybox" - taskOutput = "do you want to build a snowman" - buildOutput = "Build successful" -) - -func getHelloWorldValidationPod(namespace, volumeClaimName string) *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: hwValidationPodName, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: hwValidationPodName, - Image: "busybox", - Command: []string{ - "cat", - }, - Args: []string{fmt.Sprintf("%s/%s", logPath, logFile)}, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "scratch", - MountPath: logPath, - }, - }, - }}, - Volumes: []corev1.Volume{{ - Name: "scratch", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: volumeClaimName, - }, - }, - }}, - }, - } -} - -func getHelloWorldTask(namespace string, args []string) *v1alpha1.Task { - return tb.Task(hwTaskName, namespace, - tb.TaskSpec(tb.Step(hwContainerName, "busybox", tb.Command(args...))), - ) -} - -func getHelloWorldTaskRun(namespace string) *v1alpha1.TaskRun { - return tb.TaskRun(hwTaskRunName, namespace, tb.TaskRunSpec(tb.TaskRunTaskRef(hwTaskName))) -} - -func getBuildOutputFromVolume(t *testing.T, logger *logging.BaseLogger, c *clients, namespace, testStr string) string { - t.Helper() - // Create Validation Pod - pods := c.KubeClient.Kube.CoreV1().Pods(namespace) - - // Volume created for Task should have the same name as the Task - if _, err := pods.Create(getHelloWorldValidationPod(namespace, hwTaskRunName)); err != nil { - t.Fatalf("failed to create Validation pod to mount volume `%s`: %s", hwTaskRunName, err) - } - - logger.Infof("Waiting for pod with test volume %s to come up so we can read logs from it", hwTaskRunName) - if err := WaitForPodState(c, hwValidationPodName, namespace, func(p *corev1.Pod) (bool, error) { - // the "Running" status is used as "Succeeded" caused issues as the pod succeeds and restarts quickly - // there might be a race condition here and possibly a better way of handling this, perhaps using a Job or different state validation - if p.Status.Phase == corev1.PodRunning { - return true, nil - } - return false, nil - }, "ValidationPodCompleted"); err != nil { - t.Fatalf("error waiting for Pod %s to finish: %s", hwValidationPodName, err) - } - - // Get validation pod logs and verify that the build executed a container w/ desired output - req := pods.GetLogs(hwValidationPodName, &corev1.PodLogOptions{}) - readCloser, err := req.Stream() - if err != nil { - t.Fatalf("failed to open stream to read: %v", err) - } - defer readCloser.Close() - var buf bytes.Buffer - out := bufio.NewWriter(&buf) - _, err = io.Copy(out, readCloser) - return buf.String() -} diff --git a/test/logs/README.md b/test/logs/README.md index a14e98de11f..482f3f4d692 100644 --- a/test/logs/README.md +++ b/test/logs/README.md @@ -7,7 +7,8 @@ `Pipeline` definition. Each `TaskRun` object creates Kubernetes Pod and all the steps are translated into init containers. -There is a "gotcha" about tailing logs for init container. Logs cannot be +There is a "gotcha" about tailing logs for init containers (to be fixed in +[#224](https://github.com/knative/build-pipeline/issues/224)). Logs cannot be retrieved from pod that has been shut down for a while. In this case tailing logs will return error `Unable to retrieve container logs`. diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index 851ae976850..ed8f6b97f6c 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -36,6 +36,12 @@ import ( ) var ( + pipelineName = "pipeline" + pipelineRunName = "pipelineRun" + secretName = "secret" + saName = "serviceAccount" + taskName = "task" + task1Name = "task1" pipelineRunTimeout = 10 * time.Minute ) @@ -65,7 +71,7 @@ func TestPipelineRun(t *testing.T) { } if _, err := c.PipelineClient.Create(getFanInFanOutPipeline(index, namespace)); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", getName(hwPipelineName, index), err) + t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) } }, pipelineRunFunc: getFanInFanOutPipelineRun, @@ -79,14 +85,14 @@ func TestPipelineRun(t *testing.T) { testSetup: func(c *clients, namespace string, index int) { t.Helper() if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getPipelineRunSecret(index, namespace)); err != nil { - t.Fatalf("Failed to create secret `%s`: %s", getName(hwSecret, index), err) + t.Fatalf("Failed to create secret `%s`: %s", getName(secretName, index), err) } if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(getPipelineRunServiceAccount(index, namespace)); err != nil { - t.Fatalf("Failed to create SA `%s`: %s", getName(hwSA, index), err) + t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err) } - task := tb.Task(getName(hwTaskName, index), namespace, tb.TaskSpec( + task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec( // Reference build: https://github.com/knative/build/tree/master/test/docker-basic tb.Step("config-docker", "gcr.io/cloud-builders/docker", tb.Command("docker"), @@ -101,14 +107,14 @@ func TestPipelineRun(t *testing.T) { })), )) if _, err := c.TaskClient.Create(task); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", getName(hwTaskName, index), err) + t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) } if _, err := c.PipelineClient.Create(getHelloWorldPipelineWithSingularTask(index, namespace)); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", getName(hwPipelineName, index), err) + t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) } }, - expectedTaskRuns: []string{hwPipelineTaskName1}, + expectedTaskRuns: []string{task1Name}, // 1 from PipelineRun and 1 from Tasks defined in pipelinerun expectedNumberOfEvents: 2, pipelineRunFunc: getHelloWorldPipelineRun, @@ -130,7 +136,7 @@ func TestPipelineRun(t *testing.T) { logger.Infof("Setting up test resources for %q test in namespace %s", td.name, namespace) td.testSetup(c, namespace, i) - prName := fmt.Sprintf("%s%d", hwPipelineRunName, i) + prName := fmt.Sprintf("%s%d", pipelineRunName, i) if _, err := c.PipelineRunClient.Create(td.pipelineRunFunc(i, namespace)); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err) } @@ -154,8 +160,8 @@ func TestPipelineRun(t *testing.T) { t.Fatalf("Expected TaskRun %s to have succeeded but Status is %v", taskRunName, r.Status) } for name, key := range map[string]string{ - hwPipelineRunName: pipeline.PipelineRunLabelKey, - hwPipelineName: pipeline.PipelineLabelKey, + pipelineRunName: pipeline.PipelineRunLabelKey, + pipelineName: pipeline.PipelineLabelKey, } { expectedName := getName(name, i) lbl := pipeline.GroupName + key @@ -182,8 +188,8 @@ func TestPipelineRun(t *testing.T) { } func getHelloWorldPipelineWithSingularTask(suffix int, namespace string) *v1alpha1.Pipeline { - return tb.Pipeline(getName(hwPipelineName, suffix), namespace, tb.PipelineSpec( - tb.PipelineTask(hwPipelineTaskName1, getName(hwTaskName, suffix)), + return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( + tb.PipelineTask(task1Name, getName(taskName, suffix)), )) } @@ -238,7 +244,7 @@ func getFanInFanOutTasks(namespace string) []*v1alpha1.Task { } func getFanInFanOutPipeline(suffix int, namespace string) *v1alpha1.Pipeline { - return tb.Pipeline(getName(hwPipelineName, suffix), namespace, tb.PipelineSpec( + return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( tb.PipelineDeclaredResource("git-repo", "git"), tb.PipelineTask("create-file-kritis", "create-file", tb.PipelineTaskOutputResource("workspace", "git-repo"), @@ -271,16 +277,16 @@ func getPipelineRunServiceAccount(suffix int, namespace string) *corev1.ServiceA return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: getName(hwSA, suffix), + Name: getName(saName, suffix), }, Secrets: []corev1.ObjectReference{{ - Name: getName(hwSecret, suffix), + Name: getName(secretName, suffix), }}, } } func getFanInFanOutPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(getName(hwPipelineRunName, suffix), namespace, tb.PipelineRunSpec( - getName(hwPipelineName, suffix), + return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, tb.PipelineRunSpec( + getName(pipelineName, suffix), tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef("kritis-resource-git")), )) } @@ -298,7 +304,7 @@ func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: getName(hwSecret, suffix), + Name: getName(secretName, suffix), Annotations: map[string]string{ "build.knative.dev/docker-0": "https://us.gcr.io", "build.knative.dev/docker-1": "https://eu.gcr.io", @@ -315,9 +321,9 @@ func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { } func getHelloWorldPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(getName(hwPipelineRunName, suffix), namespace, tb.PipelineRunSpec( - getName(hwPipelineName, suffix), - tb.PipelineRunServiceAccount(fmt.Sprintf("%s%d", hwSA, suffix)), + return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, tb.PipelineRunSpec( + getName(pipelineName, suffix), + tb.PipelineRunServiceAccount(fmt.Sprintf("%s%d", saName, suffix)), )) } diff --git a/test/status_test.go b/test/status_test.go index 5fc9554a52f..6c6dc5d9340 100644 --- a/test/status_test.go +++ b/test/status_test.go @@ -39,18 +39,18 @@ func TestTaskRunPipelineRunStatus(t *testing.T) { tb.Step("foo", "busybox", tb.Command("ls", "-la")), )) if _, err := c.TaskClient.Create(task); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", hwTaskName, err) + t.Fatalf("Failed to create Task: %s", err) } taskRun := tb.TaskRun("apple", namespace, tb.TaskRunSpec( tb.TaskRunTaskRef("banana"), tb.TaskRunServiceAccount("inexistent"), )) if _, err := c.TaskRunClient.Create(taskRun); err != nil { - t.Fatalf("Failed to create TaskRun `%s`: %s", hwTaskRunName, err) + t.Fatalf("Failed to create TaskRun: %s", err) } - logger.Infof("Waiting for TaskRun %s in namespace %s to fail", hwTaskRunName, namespace) + logger.Infof("Waiting for TaskRun in namespace %s to fail", namespace) if err := WaitForTaskRunState(c, "apple", TaskRunFailed("apple"), "BuildValidationFailed"); err != nil { - t.Errorf("Error waiting for TaskRun %s to finish: %s", hwTaskRunName, err) + t.Errorf("Error waiting for TaskRun to finish: %s", err) } pipeline := tb.Pipeline("tomatoes", namespace, @@ -66,8 +66,8 @@ func TestTaskRunPipelineRunStatus(t *testing.T) { t.Fatalf("Failed to create PipelineRun `%s`: %s", "pear", err) } - logger.Infof("Waiting for PipelineRun %s in namespace %s to fail", hwTaskRunName, namespace) + logger.Infof("Waiting for PipelineRun in namespace %s to fail", namespace) if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, PipelineRunFailed("pear"), "BuildValidationFailed"); err != nil { - t.Errorf("Error waiting for TaskRun %s to finish: %s", hwTaskRunName, err) + t.Errorf("Error waiting for TaskRun to finish: %s", err) } } diff --git a/test/taskrun_test.go b/test/taskrun_test.go deleted file mode 100644 index 5d1801202dd..00000000000 --- a/test/taskrun_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build e2e - -/* -Copyright 2018 Knative Authors LLC -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - "fmt" - "strings" - "testing" - - knativetest "github.com/knative/pkg/test" - "github.com/knative/pkg/test/logging" -) - -// TestTaskRun is an integration test that will verify a very simple "hello world" TaskRun can be -// executed. -func TestTaskRun(t *testing.T) { - logger := logging.GetContextLogger(t.Name()) - c, namespace := setup(t, logger) - - knativetest.CleanupOnInterrupt(func() { tearDown(t, logger, c, namespace) }, logger) - defer tearDown(t, logger, c, namespace) - - logger.Infof("Creating Task and TaskRun in namespace %s", namespace) - if _, err := c.TaskClient.Create(getHelloWorldTask(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s", taskOutput)})); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", hwTaskName, err) - } - if _, err := c.TaskRunClient.Create(getHelloWorldTaskRun(namespace)); err != nil { - t.Fatalf("Failed to create TaskRun `%s`: %s", hwTaskRunName, err) - } - - logger.Infof("Waiting for TaskRun %s in namespace %s to complete", hwTaskRunName, namespace) - if err := WaitForTaskRunState(c, hwTaskRunName, TaskRunSucceed(hwTaskRunName), "TaskRunSuccess"); err != nil { - t.Errorf("Error waiting for TaskRun %s to finish: %s", hwTaskRunName, err) - } - - // The volume created with the results will have the same name as the TaskRun - logger.Infof("Verifying TaskRun %s output in volume %s", hwTaskRunName, hwTaskRunName) - output := getBuildOutputFromVolume(t, logger, c, namespace, taskOutput) - if !strings.Contains(output, taskOutput) { - t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, hwValidationPodName, output) - } -}