From 13c2142cdd60aaa40337ec5a7296ef2413ef72da Mon Sep 17 00:00:00 2001 From: Vincent Demeester Date: Thu, 19 Mar 2020 14:11:30 +0100 Subject: [PATCH] =?UTF-8?q?Introduce=20v1beta1=20e2e=20go=20tests=20?= =?UTF-8?q?=F0=9F=93=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … and move v1alpha1 e2e go tests in their own package. Signed-off-by: Vincent Demeester --- test/artifact_bucket_test.go | 201 +++++-- test/clients.go | 17 +- test/controller.go | 42 +- test/pipelinerun_test.go | 374 +++++++++---- test/sidecar_test.go | 44 +- test/v1alpha1/adoc.go | 44 ++ test/v1alpha1/artifact_bucket_test.go | 266 +++++++++ test/v1alpha1/build_logs.go | 70 +++ test/{ => v1alpha1}/cancel_test.go | 0 test/v1alpha1/clients.go | 96 ++++ test/{ => v1alpha1}/cluster_resource_test.go | 0 test/v1alpha1/controller.go | 178 ++++++ test/{ => v1alpha1}/dag_test.go | 0 test/{ => v1alpha1}/duplicate_test.go | 0 test/{ => v1alpha1}/embed_test.go | 0 test/{ => v1alpha1}/entrypoint_test.go | 0 test/{ => v1alpha1}/git_checkout_test.go | 0 test/{ => v1alpha1}/helm_task_test.go | 0 test/v1alpha1/init_test.go | 213 +++++++ test/{ => v1alpha1}/kaniko_task_test.go | 0 test/v1alpha1/ko_test.go | 53 ++ test/v1alpha1/pipelinerun_test.go | 558 +++++++++++++++++++ test/v1alpha1/registry_test.go | 93 ++++ test/{ => v1alpha1}/retry_test.go | 0 test/v1alpha1/secret.go | 63 +++ test/v1alpha1/sidecar_test.go | 163 ++++++ test/{ => v1alpha1}/start_time_test.go | 0 test/{ => v1alpha1}/status_test.go | 0 test/{ => v1alpha1}/taskrun_test.go | 0 test/{ => v1alpha1}/timeout_test.go | 0 test/v1alpha1/wait.go | 223 ++++++++ test/v1alpha1/wait_example_test.go | 62 +++ test/{ => v1alpha1}/wait_test.go | 0 test/{ => v1alpha1}/workingdir_test.go | 0 test/v1alpha1/workspace_test.go | 186 +++++++ test/wait.go | 14 +- test/wait_example_test.go | 4 +- test/workspace_test.go | 153 +++-- 38 files changed, 2836 insertions(+), 281 deletions(-) create mode 100644 test/v1alpha1/adoc.go create mode 100644 test/v1alpha1/artifact_bucket_test.go create mode 100644 test/v1alpha1/build_logs.go rename test/{ => v1alpha1}/cancel_test.go (100%) create mode 100644 test/v1alpha1/clients.go rename test/{ => v1alpha1}/cluster_resource_test.go (100%) create mode 100644 test/v1alpha1/controller.go rename test/{ => v1alpha1}/dag_test.go (100%) rename test/{ => v1alpha1}/duplicate_test.go (100%) rename test/{ => v1alpha1}/embed_test.go (100%) rename test/{ => v1alpha1}/entrypoint_test.go (100%) rename test/{ => v1alpha1}/git_checkout_test.go (100%) rename test/{ => v1alpha1}/helm_task_test.go (100%) create mode 100644 test/v1alpha1/init_test.go rename test/{ => v1alpha1}/kaniko_task_test.go (100%) create mode 100644 test/v1alpha1/ko_test.go create mode 100644 test/v1alpha1/pipelinerun_test.go create mode 100644 test/v1alpha1/registry_test.go rename test/{ => v1alpha1}/retry_test.go (100%) create mode 100644 test/v1alpha1/secret.go create mode 100644 test/v1alpha1/sidecar_test.go rename test/{ => v1alpha1}/start_time_test.go (100%) rename test/{ => v1alpha1}/status_test.go (100%) rename test/{ => v1alpha1}/taskrun_test.go (100%) rename test/{ => v1alpha1}/timeout_test.go (100%) create mode 100644 test/v1alpha1/wait.go create mode 100644 test/v1alpha1/wait_example_test.go rename test/{ => v1alpha1}/wait_test.go (100%) rename test/{ => v1alpha1}/workingdir_test.go (100%) create mode 100644 test/v1alpha1/workspace_test.go diff --git a/test/artifact_bucket_test.go b/test/artifact_bucket_test.go index 0fc1a3338b1..3ffaad85d3b 100644 --- a/test/artifact_bucket_test.go +++ b/test/artifact_bucket_test.go @@ -26,6 +26,8 @@ import ( "time" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/artifacts" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" @@ -66,28 +68,44 @@ func TestStorageBucketPipelineRun(t *testing.T) { defer deleteBucketSecret(c, t, namespace) t.Logf("Creating GCS bucket %s", bucketName) - createbuckettask := tb.Task("createbuckettask", namespace, tb.TaskSpec( - tb.TaskVolume("bucket-secret-volume", tb.VolumeSource(corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: bucketSecretName, - }, - })), - tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), - tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil mb gs://%s", bucketName)), - tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), - tb.StepEnvVar("CREDENTIALS", fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey)), - ), - ), - ) + createbuckettask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "createbuckettask", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "step1", + Image: "google/cloud-sdk:alpine", + Command: []string{"/bin/bash"}, + Args: []string{"-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil mb gs://%s", bucketName)}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "bucket-secret-volume", + MountPath: fmt.Sprintf("/var/secret/%s", bucketSecretName), + }}, + Env: []corev1.EnvVar{{ + Name: "CREDENTIALS", Value: fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey), + }}, + }}}, + Volumes: []corev1.Volume{{ + Name: "bucket-secret-volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: bucketSecretName, + }, + }, + }}, + }, + } t.Logf("Creating Task %s", "createbuckettask") if _, err := c.TaskClient.Create(createbuckettask); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "createbuckettask", err) } - createbuckettaskrun := tb.TaskRun("createbuckettaskrun", namespace, - tb.TaskRunSpec(tb.TaskRunTaskRef("createbuckettask"))) + createbuckettaskrun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: "createbuckettaskrun", Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "createbuckettask"}, + }, + } t.Logf("Creating TaskRun %s", "createbuckettaskrun") if _, err := c.TaskRunClient.Create(createbuckettaskrun); err != nil { @@ -129,48 +147,97 @@ func TestStorageBucketPipelineRun(t *testing.T) { } t.Logf("Creating Task %s", addFileTaskName) - addFileTask := tb.Task(addFileTaskName, namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), - tb.TaskOutputs(tb.OutputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), - tb.Step("ubuntu", tb.StepName("addfile"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "'#!/bin/bash\necho hello' > /workspace/helloworldgit/newfile"), - ), - tb.Step("ubuntu", tb.StepName("make-executable"), tb.StepCommand("chmod"), - tb.StepArgs("+x", "/workspace/helloworldgit/newfile")), - )) + addFileTask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: addFileTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{ + Name: "addfile", Image: "ubuntu", + }, + Script: "echo '#!/bin/bash\necho hello' > /workspace/helloworldgit/newfile", + }, { + Container: corev1.Container{ + Name: "make-executable", Image: "ubuntu", + }, + Script: "chmod +x /workspace/helloworldgit/newfile", + }}, + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: helloworldResourceName, Type: resourcev1alpha1.PipelineResourceTypeGit, + }}}, + Outputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: helloworldResourceName, Type: resourcev1alpha1.PipelineResourceTypeGit, + }}}, + }, + }, + } if _, err := c.TaskClient.Create(addFileTask); err != nil { t.Fatalf("Failed to create Task `%s`: %s", addFileTaskName, err) } t.Logf("Creating Task %s", runFileTaskName) - readFileTask := tb.Task(runFileTaskName, namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), - tb.Step("ubuntu", tb.StepName("runfile"), tb.StepCommand("/workspace/helloworld/newfile")), - )) + readFileTask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: runFileTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "runfile", Image: "ubuntu", + Command: []string{"/workspace/hellowrld/newfile"}, + }}}, + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: helloworldResourceName, Type: resourcev1alpha1.PipelineResourceTypeGit, + }}}, + }, + }, + } if _, err := c.TaskClient.Create(readFileTask); err != nil { t.Fatalf("Failed to create Task `%s`: %s", runFileTaskName, err) } t.Logf("Creating Pipeline %s", bucketTestPipelineName) - bucketTestPipeline := tb.Pipeline(bucketTestPipelineName, namespace, tb.PipelineSpec( - tb.PipelineDeclaredResource("source-repo", "git"), - tb.PipelineTask("addfile", addFileTaskName, - tb.PipelineTaskInputResource("helloworldgit", "source-repo"), - tb.PipelineTaskOutputResource("helloworldgit", "source-repo"), - ), - tb.PipelineTask("runfile", runFileTaskName, - tb.PipelineTaskInputResource("helloworldgit", "source-repo", tb.From("addfile")), - ), - )) + bucketTestPipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: bucketTestPipelineName, Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Resources: []v1beta1.PipelineDeclaredResource{{ + Name: "source-repo", Type: resourcev1alpha1.PipelineResourceTypeGit, + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "addfile", + TaskRef: &v1beta1.TaskRef{Name: addFileTaskName}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "helloworldgit", Resource: "source-repo", + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{{ + Name: "helloworldgit", Resource: "source-rep", + }}, + }, + }, { + Name: "runfile", + TaskRef: &v1beta1.TaskRef{Name: runFileTaskName}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "helloworldgit", Resource: "source-repo", + }}, + }, + }}, + }, + } if _, err := c.PipelineClient.Create(bucketTestPipeline); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", bucketTestPipelineName, err) } t.Logf("Creating PipelineRun %s", bucketTestPipelineRunName) - bucketTestPipelineRun := tb.PipelineRun(bucketTestPipelineRunName, namespace, tb.PipelineRunSpec( - bucketTestPipelineName, - tb.PipelineRunResourceBinding("source-repo", tb.PipelineResourceBindingRef(helloworldResourceName)), - )) + bucketTestPipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: bucketTestPipelineRunName, Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: bucketTestPipelineName}, + Resources: []v1beta1.PipelineResourceBinding{{ + Name: "source-repo", + ResourceRef: &v1beta1.PipelineResourceRef{Name: helloworldResourceName}, + }}, + }, + } if _, err := c.PipelineRunClient.Create(bucketTestPipelineRun); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", bucketTestPipelineRunName, err) } @@ -232,28 +299,44 @@ func resetConfigMap(t *testing.T, c *clients, namespace, configName string, valu } func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, bucketSecretName, bucketSecretKey string) { - deletelbuckettask := tb.Task("deletelbuckettask", namespace, tb.TaskSpec( - tb.TaskVolume("bucket-secret-volume", tb.VolumeSource(corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: bucketSecretName, - }, - })), - tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), - tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil rm -r gs://%s", bucketName)), - tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), - tb.StepEnvVar("CREDENTIALS", fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey)), - ), - ), - ) + deletelbuckettask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "deletelbuckettask", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "step1", + Image: "google/cloud-sdk:alpine", + Command: []string{"/bin/bash"}, + Args: []string{"-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil rm -r gs://%s", bucketName)}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "bucket-secret-volume", + MountPath: fmt.Sprintf("/var/secret/%s", bucketSecretName), + }}, + Env: []corev1.EnvVar{{ + Name: "CREDENTIALS", Value: fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey), + }}, + }}}, + Volumes: []corev1.Volume{{ + Name: "bucket-secret-volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: bucketSecretName, + }, + }, + }}, + }, + } t.Logf("Creating Task %s", "deletelbuckettask") if _, err := c.TaskClient.Create(deletelbuckettask); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "deletelbuckettask", err) } - deletelbuckettaskrun := tb.TaskRun("deletelbuckettaskrun", namespace, - tb.TaskRunSpec(tb.TaskRunTaskRef("deletelbuckettask"))) + deletelbuckettaskrun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: "deletelbuckettaskrun", Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "deletelbuckettask"}, + }, + } t.Logf("Creating TaskRun %s", "deletelbuckettaskrun") if _, err := c.TaskRunClient.Create(deletelbuckettaskrun); err != nil { diff --git a/test/clients.go b/test/clients.go index 3d4b9efafea..56822302a80 100644 --- a/test/clients.go +++ b/test/clients.go @@ -43,6 +43,7 @@ import ( "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" resourceversioned "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned" resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1" knativetest "knative.dev/pkg/test" @@ -52,10 +53,10 @@ import ( type clients struct { KubeClient *knativetest.KubeClient - PipelineClient v1alpha1.PipelineInterface - TaskClient v1alpha1.TaskInterface - TaskRunClient v1alpha1.TaskRunInterface - PipelineRunClient v1alpha1.PipelineRunInterface + PipelineClient v1beta1.PipelineInterface + TaskClient v1beta1.TaskInterface + TaskRunClient v1beta1.TaskRunInterface + PipelineRunClient v1beta1.PipelineRunInterface PipelineResourceClient resourcev1alpha1.PipelineResourceInterface ConditionClient v1alpha1.ConditionInterface } @@ -86,10 +87,10 @@ func newClients(t *testing.T, configPath, clusterName, namespace string) *client if err != nil { t.Fatalf("failed to create pipeline clientset from config file at %s: %s", configPath, err) } - c.PipelineClient = cs.TektonV1alpha1().Pipelines(namespace) - c.TaskClient = cs.TektonV1alpha1().Tasks(namespace) - c.TaskRunClient = cs.TektonV1alpha1().TaskRuns(namespace) - c.PipelineRunClient = cs.TektonV1alpha1().PipelineRuns(namespace) + c.PipelineClient = cs.TektonV1beta1().Pipelines(namespace) + c.TaskClient = cs.TektonV1beta1().Tasks(namespace) + c.TaskRunClient = cs.TektonV1beta1().TaskRuns(namespace) + c.PipelineRunClient = cs.TektonV1beta1().PipelineRuns(namespace) c.PipelineResourceClient = rcs.TektonV1alpha1().PipelineResources(namespace) c.ConditionClient = cs.TektonV1alpha1().Conditions(namespace) return c diff --git a/test/controller.go b/test/controller.go index e02c848b049..a229b5704b3 100644 --- a/test/controller.go +++ b/test/controller.go @@ -22,15 +22,17 @@ import ( // Link in the fakes so they get injected into injection.Fake "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" informersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + informersv1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1" fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" - fakeclustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake" fakeconditioninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake" - fakepipelineinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake" - fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake" - faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake" - faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake" + fakeclustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask/fake" + fakepipelineinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipeline/fake" + fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/fake" + faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/task/fake" + faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake" fakeresourceclientset "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/fake" resourceinformersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1" fakeresourceclient "github.com/tektoncd/pipeline/pkg/client/resource/injection/client/fake" @@ -46,11 +48,11 @@ import ( // Data represents the desired state of the system (i.e. existing resources) to seed controllers // with. type Data struct { - PipelineRuns []*v1alpha1.PipelineRun - Pipelines []*v1alpha1.Pipeline - TaskRuns []*v1alpha1.TaskRun - Tasks []*v1alpha1.Task - ClusterTasks []*v1alpha1.ClusterTask + PipelineRuns []*v1beta1.PipelineRun + Pipelines []*v1beta1.Pipeline + TaskRuns []*v1beta1.TaskRun + Tasks []*v1beta1.Task + ClusterTasks []*v1beta1.ClusterTask PipelineResources []*v1alpha1.PipelineResource Conditions []*v1alpha1.Condition Pods []*corev1.Pod @@ -66,11 +68,11 @@ type Clients struct { // Informers holds references to informers which are useful for reconciler tests. type Informers struct { - PipelineRun informersv1alpha1.PipelineRunInformer - Pipeline informersv1alpha1.PipelineInformer - TaskRun informersv1alpha1.TaskRunInformer - Task informersv1alpha1.TaskInformer - ClusterTask informersv1alpha1.ClusterTaskInformer + PipelineRun informersv1beta1.PipelineRunInformer + Pipeline informersv1beta1.PipelineInformer + TaskRun informersv1beta1.TaskRunInformer + Task informersv1beta1.TaskInformer + ClusterTask informersv1beta1.ClusterTaskInformer PipelineResource resourceinformersv1alpha1.PipelineResourceInformer Condition informersv1alpha1.ConditionInformer Pod coreinformers.PodInformer @@ -107,7 +109,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.PipelineRun.Informer().GetIndexer().Add(pr); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().PipelineRuns(pr.Namespace).Create(pr); err != nil { + if _, err := c.Pipeline.TektonV1beta1().PipelineRuns(pr.Namespace).Create(pr); err != nil { t.Fatal(err) } } @@ -115,7 +117,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.Pipeline.Informer().GetIndexer().Add(p); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().Pipelines(p.Namespace).Create(p); err != nil { + if _, err := c.Pipeline.TektonV1beta1().Pipelines(p.Namespace).Create(p); err != nil { t.Fatal(err) } } @@ -123,7 +125,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.TaskRun.Informer().GetIndexer().Add(tr); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().TaskRuns(tr.Namespace).Create(tr); err != nil { + if _, err := c.Pipeline.TektonV1beta1().TaskRuns(tr.Namespace).Create(tr); err != nil { t.Fatal(err) } } @@ -131,7 +133,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.Task.Informer().GetIndexer().Add(ta); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().Tasks(ta.Namespace).Create(ta); err != nil { + if _, err := c.Pipeline.TektonV1beta1().Tasks(ta.Namespace).Create(ta); err != nil { t.Fatal(err) } } @@ -139,7 +141,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.ClusterTask.Informer().GetIndexer().Add(ct); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().ClusterTasks().Create(ct); err != nil { + if _, err := c.Pipeline.TektonV1beta1().ClusterTasks().Create(ct); err != nil { t.Fatal(err) } } diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index 575c67996ae..d3990670b52 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -27,6 +27,8 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/artifacts" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" @@ -46,6 +48,8 @@ var ( task1Name = "task1" cond1Name = "cond-1" pipelineRunTimeout = 10 * time.Minute + + kanikoGitResourceName = "go-example-git" ) func TestPipelineRun(t *testing.T) { @@ -55,7 +59,7 @@ func TestPipelineRun(t *testing.T) { testSetup func(t *testing.T, c *clients, namespace string, index int) expectedTaskRuns []string expectedNumberOfEvents int - pipelineRunFunc func(int, string) *v1alpha1.PipelineRun + pipelineRunFunc func(int, string) *v1beta1.PipelineRun } tds := []tests{{ @@ -94,15 +98,24 @@ func TestPipelineRun(t *testing.T) { t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err) } - task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsParamSpec("path", v1alpha1.ParamTypeString), - tb.InputsParamSpec("dest", v1alpha1.ParamTypeString)), - // Reference build: https://github.com/knative/build/tree/master/test/docker-basic - tb.Step("quay.io/rhpipeline/skopeo:alpine", tb.StepName("config-docker"), - tb.StepCommand("skopeo"), - tb.StepArgs("copy", "$(inputs.params.path)", "$(inputs.params.dest)"), - ), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: getName(taskName, index), Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "path", Type: v1beta1.ParamTypeString, + }, { + Name: "dest", Type: v1beta1.ParamTypeString, + }}, + Steps: []v1beta1.Step{{ + Container: corev1.Container{ + Name: "config-docker", + Image: "quay.io/rhpipeline/skopeo:alpine", + Command: []string{"skopeo"}, + Args: []string{"copy", "$(params.path)", "$(params.dest)"}, + }}, + }, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) } @@ -124,12 +137,16 @@ func TestPipelineRun(t *testing.T) { t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err) } - task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec( - tb.Step("ubuntu", - tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo hello, world"), - ), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: getName(taskName, index), Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo hello, world"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) } @@ -228,87 +245,168 @@ func TestPipelineRun(t *testing.T) { } } -func getHelloWorldPipelineWithSingularTask(suffix int, namespace string) *v1alpha1.Pipeline { - return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( - tb.PipelineParamSpec("path", v1alpha1.ParamTypeString), - tb.PipelineParamSpec("dest", v1alpha1.ParamTypeString), - tb.PipelineTask(task1Name, getName(taskName, suffix), - tb.PipelineTaskParam("path", "$(params.path)"), - tb.PipelineTaskParam("dest", "$(params.dest)")), - )) +func getHelloWorldPipelineWithSingularTask(suffix int, namespace string) *v1beta1.Pipeline { + return &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: getName(pipelineName, suffix), Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "path", Type: v1beta1.ParamTypeString, + }, { + Name: "dest", Type: v1beta1.ParamTypeString, + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: task1Name, + TaskRef: &v1beta1.TaskRef{Name: getName(taskName, suffix)}, + Params: []v1beta1.Param{{ + Name: "path", Value: v1beta1.NewArrayOrString("$(params.path)"), + }, { + Name: "dest", Value: v1beta1.NewArrayOrString("$(params.dest)"), + }}, + }}, + }, + } } -func getFanInFanOutTasks(namespace string) []*v1alpha1.Task { - inWorkspaceResource := tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit) - outWorkspaceResource := tb.OutputsResource("workspace", v1alpha1.PipelineResourceTypeGit) - return []*v1alpha1.Task{ - tb.Task("create-file", namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit, - tb.ResourceTargetPath("brandnewspace"), - )), - tb.TaskOutputs(outWorkspaceResource), - tb.Step("ubuntu", tb.StepName("write-data-task-0-step-0"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo stuff > $(outputs.resources.workspace.path)/stuff"), - ), - tb.Step("ubuntu", tb.StepName("write-data-task-0-step-1"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo other > $(outputs.resources.workspace.path)/other"), - ), - )), - tb.Task("check-create-files-exists", namespace, tb.TaskSpec( - tb.TaskInputs(inWorkspaceResource), - tb.TaskOutputs(outWorkspaceResource), - tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "[[ stuff == $(cat $(inputs.resources.workspace.path)/stuff) ]]"), - ), - tb.Step("ubuntu", tb.StepName("write-data-task-1"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo something > $(outputs.resources.workspace.path)/something"), - ), - )), - tb.Task("check-create-files-exists-2", namespace, tb.TaskSpec( - tb.TaskInputs(inWorkspaceResource), - tb.TaskOutputs(outWorkspaceResource), - tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "[[ other == $(cat $(inputs.resources.workspace.path)/other) ]]"), - ), - tb.Step("ubuntu", tb.StepName("write-data-task-1"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo else > $(outputs.resources.workspace.path)/else"), - ), - )), - tb.Task("read-files", namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit, - tb.ResourceTargetPath("readingspace"), - )), - tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "[[ something == $(cat $(inputs.resources.workspace.path)/something) ]]"), - ), - tb.Step("ubuntu", tb.StepName("read-from-task-1"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "[[ else == $(cat $(inputs.resources.workspace.path)/else) ]]"), - ), - )), - } +func getFanInFanOutTasks(namespace string) []*v1beta1.Task { + workspaceResource := v1beta1.TaskResource{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "workspace", + Type: resource.PipelineResourceTypeGit, + }} + return []*v1beta1.Task{{ + ObjectMeta: metav1.ObjectMeta{Name: "create-file", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "workspace", + Type: resource.PipelineResourceTypeGit, + TargetPath: "brandnewspace", + }}}, + Outputs: []v1beta1.TaskResource{workspaceResource}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "write-data-task-0-step-0", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo stuff > $(resources.outputs.workspace.path)/stuff"}, + }}, {Container: corev1.Container{ + Name: "write-data-task-0-step-1", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo other > $(resources.outputs.workspace.path)/other"}, + }}}, + }, + }, { + ObjectMeta: metav1.ObjectMeta{Name: "create-files-exists", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{workspaceResource}, + Outputs: []v1beta1.TaskResource{workspaceResource}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "read-from-task-0", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "[[ stuff == $(cat $(inputs.resources.workspace.path)/stuff) ]]"}, + }}, {Container: corev1.Container{ + Name: "write-data-task-1", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo something > $(outputs.resources.workspace.path)/something"}, + }}}, + }, + }, { + ObjectMeta: metav1.ObjectMeta{Name: "create-files-exists-2", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{workspaceResource}, + Outputs: []v1beta1.TaskResource{workspaceResource}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "read-from-task-0", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "[[ other == $(cat $(inputs.resources.workspace.path)/other) ]]"}, + }}, {Container: corev1.Container{ + Name: "write-data-task-1", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo else > $(outputs.resources.workspace.path)/else"}, + }}}, + }, + }, { + ObjectMeta: metav1.ObjectMeta{Name: "read-files", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "workspace", + Type: resource.PipelineResourceTypeGit, + TargetPath: "readingspace", + }}}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "read-from-task-0", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "[[ something == $(cat $(inputs.resources.workspace.path)/something) ]]"}, + }}, {Container: corev1.Container{ + Name: "read-from-task-1", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "[[ else == $(cat $(inputs.resources.workspace.path)/else) ]]"}, + }}}, + }, + }} } -func getFanInFanOutPipeline(suffix int, namespace string) *v1alpha1.Pipeline { - outGitResource := tb.PipelineTaskOutputResource("workspace", "git-repo") - - return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( - tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineTask("create-file-kritis", "create-file", - tb.PipelineTaskInputResource("workspace", "git-repo"), - outGitResource, - ), - tb.PipelineTask("create-fan-out-1", "check-create-files-exists", - tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")), - outGitResource, - ), - tb.PipelineTask("create-fan-out-2", "check-create-files-exists-2", - tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")), - outGitResource, - ), - tb.PipelineTask("check-fan-in", "read-files", - tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-fan-out-2", "create-fan-out-1")), - ), - )) +func getFanInFanOutPipeline(suffix int, namespace string) *v1beta1.Pipeline { + outGitResource := v1beta1.PipelineTaskOutputResource{ + Name: "workspace", + Resource: "git-repo", + } + return &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: getName(pipelineName, suffix), Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Resources: []v1beta1.PipelineDeclaredResource{{ + Name: "git-repo", Type: resource.PipelineResourceTypeGit, + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "create-file-kritis", + TaskRef: &v1beta1.TaskRef{Name: "create-file"}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "workspace", Resource: "git-repo", + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{outGitResource}, + }, + }, { + Name: "create-fan-out-1", + TaskRef: &v1beta1.TaskRef{Name: "check-create-files-exists"}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "workspace", Resource: "git-repo", From: []string{"create-file-kritis"}, + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{outGitResource}, + }, + }, { + Name: "create-fan-out-2", + TaskRef: &v1beta1.TaskRef{Name: "check-create-files-exists-2"}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "workspace", Resource: "git-repo", From: []string{"create-file-kritis"}, + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{outGitResource}, + }, + }, { + Name: "check-fan-in", + TaskRef: &v1beta1.TaskRef{Name: "read-files"}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "workspace", Resource: "git-repo", From: []string{"create-file-kritis"}, + }}, + }, + }}, + }, + } } func getFanInFanOutGitResources(namespace string) []*v1alpha1.PipelineResource { @@ -332,11 +430,17 @@ func getPipelineRunServiceAccount(suffix int, namespace string) *corev1.ServiceA }}, } } -func getFanInFanOutPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, - tb.PipelineRunSpec(getName(pipelineName, suffix), - tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef("kritis-resource-git")), - )) +func getFanInFanOutPipelineRun(suffix int, namespace string) *v1beta1.PipelineRun { + return &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: getName(pipelineRunName, suffix), Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: getName(pipelineName, suffix)}, + Resources: []v1beta1.PipelineResourceBinding{{ + Name: "git-repo", + ResourceRef: &v1beta1.PipelineResourceRef{Name: "kritis-resource-git"}, + }}, + }, + } } func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { @@ -368,15 +472,26 @@ func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { } } -func getHelloWorldPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, - tb.PipelineRunLabel("hello-world-key", "hello-world-value"), - tb.PipelineRunSpec(getName(pipelineName, suffix), - tb.PipelineRunParam("path", "docker://gcr.io/build-crd-testing/secret-sauce"), - tb.PipelineRunParam("dest", "dir:///tmp/"), - tb.PipelineRunServiceAccountName(fmt.Sprintf("%s%d", saName, suffix)), - ), - ) +func getHelloWorldPipelineRun(suffix int, namespace string) *v1beta1.PipelineRun { + return &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: getName(pipelineRunName, suffix), Namespace: namespace, + Labels: map[string]string{ + "hello-world-key": "hello-world-value", + }, + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: getName(pipelineName, suffix)}, + Params: []v1beta1.Param{{ + Name: "path", + Value: v1beta1.NewArrayOrString("docker://gcr.io/build-crd-testing/secret-sauce"), + }, { + Name: "dest", + Value: v1beta1.NewArrayOrString("dir:///tmp/"), + }}, + ServiceAccountName: fmt.Sprintf("%s%d", saName, suffix), + }, + } } func getName(namespace string, suffix int) string { @@ -418,7 +533,7 @@ func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, // checkLabelPropagation checks that labels are correctly propagating from // Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { +func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1beta1.TaskRun) { // Our controllers add 4 labels automatically. If custom labels are set on // the Pipeline, PipelineRun, or Task then the map will have to be resized. labels := make(map[string]string, 4) @@ -471,7 +586,7 @@ func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineR // checkAnnotationPropagation checks that annotations are correctly propagating from // Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { +func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1beta1.TaskRun) { annotations := make(map[string]string) // Check annotation propagation to PipelineRuns. @@ -508,7 +623,7 @@ func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipe assertAnnotationsMatch(t, annotations, pod.ObjectMeta.Annotations) } -func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1alpha1.TaskRun) *corev1.Pod { +func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1beta1.TaskRun) *corev1.Pod { // The Pod name has a random suffix, so we filter by label to find the one we care about. pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{ LabelSelector: pipeline.GroupName + pipeline.TaskRunLabelKey + " = " + tr.Name, @@ -538,11 +653,23 @@ func assertAnnotationsMatch(t *testing.T, expectedAnnotations, actualAnnotations } } -func getPipelineWithFailingCondition(suffix int, namespace string) *v1alpha1.Pipeline { - return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( - tb.PipelineTask(task1Name, getName(taskName, suffix), tb.PipelineTaskCondition(cond1Name)), - tb.PipelineTask("task2", getName(taskName, suffix), tb.RunAfter(task1Name)), - )) +func getPipelineWithFailingCondition(suffix int, namespace string) *v1beta1.Pipeline { + return &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: getName(pipelineName, suffix), Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Name: task1Name, + TaskRef: &v1beta1.TaskRef{Name: getName(taskName, suffix)}, + Conditions: []v1beta1.PipelineTaskCondition{{ + ConditionRef: "cond1Name", + }}, + }, { + Name: "task2", + TaskRef: &v1beta1.TaskRef{Name: getName(taskName, suffix)}, + RunAfter: []string{task1Name}, + }}, + }, + } } func getFailingCondition(namespace string) *v1alpha1.Condition { @@ -550,9 +677,16 @@ func getFailingCondition(namespace string) *v1alpha1.Condition { tb.Command("/bin/bash"), tb.Args("exit 1")))) } -func getConditionalPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, - tb.PipelineRunLabel("hello-world-key", "hello-world-value"), - tb.PipelineRunSpec(getName(pipelineName, suffix)), - ) +func getConditionalPipelineRun(suffix int, namespace string) *v1beta1.PipelineRun { + return &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: getName(pipelineRunName, suffix), Namespace: namespace, + Labels: map[string]string{ + "hello-world-key": "hello-world-vaule", + }, + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: getName(pipelineName, suffix)}, + }, + } } diff --git a/test/sidecar_test.go b/test/sidecar_test.go index b2186e48b23..90b0168a27d 100644 --- a/test/sidecar_test.go +++ b/test/sidecar_test.go @@ -23,8 +23,7 @@ import ( "testing" "time" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - tb "github.com/tektoncd/pipeline/test/builder" + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -60,26 +59,25 @@ func TestSidecarTaskSupport(t *testing.T) { t.Run(test.desc, func(t *testing.T) { sidecarTaskName := fmt.Sprintf("%s-%d", sidecarTaskName, i) sidecarTaskRunName := fmt.Sprintf("%s-%d", sidecarTaskRunName, i) - task := tb.Task(sidecarTaskName, namespace, - tb.TaskSpec( - tb.Step( - "busybox:1.31.0-musl", - tb.StepName(primaryContainerName), - tb.StepCommand(test.stepCommand...), - ), - tb.Sidecar( - sidecarContainerName, - "busybox:1.31.0-musl", - tb.Command(test.sidecarCommand...), - ), - ), - ) - - taskRun := tb.TaskRun(sidecarTaskRunName, namespace, - tb.TaskRunSpec(tb.TaskRunTaskRef(sidecarTaskName), - tb.TaskRunTimeout(1*time.Minute), - ), - ) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: sidecarTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{Name: primaryContainerName, Image: "busybox:1.31.0-musl", Command: test.stepCommand}, + }}, + Sidecars: []v1beta1.Sidecar{{ + Container: corev1.Container{Name: sidecarContainerName, Image: "busybox:1.31.0-musl", Command: test.sidecarCommand}, + }}, + }, + } + + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: sidecarTaskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: sidecarTaskName}, + Timeout: &metav1.Duration{Duration: 1 * time.Minute}, + }, + } t.Logf("Creating Task %q", sidecarTaskName) if _, err := clients.TaskClient.Create(task); err != nil { @@ -92,7 +90,7 @@ func TestSidecarTaskSupport(t *testing.T) { } var podName string - if err := WaitForTaskRunState(clients, sidecarTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { + if err := WaitForTaskRunState(clients, sidecarTaskRunName, func(tr *v1beta1.TaskRun) (bool, error) { podName = tr.Status.PodName return TaskRunSucceed(sidecarTaskRunName)(tr) }, "TaskRunSucceed"); err != nil { diff --git a/test/v1alpha1/adoc.go b/test/v1alpha1/adoc.go new file mode 100644 index 00000000000..3deae97650b --- /dev/null +++ b/test/v1alpha1/adoc.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package test holds the project's test helpers and end-to-end tests (e2e). + +Create Pipeline resources + +To create Tekton objects (e.g. Task, Pipeline, …), you +can use the builder (./builder) package to reduce noise: + + func MyTest(t *testing.T){ + // Pipeline + pipeline := tb.Pipeline("tomatoes", "namespace", + tb.PipelineSpec(tb.PipelineTask("foo", "banana")), + ) + // … and PipelineRun + pipelineRun := tb.PipelineRun("pear", "namespace", + tb.PipelineRunSpec("tomatoes", tb.PipelineRunServiceAccount("inexistent")), + ) + // And do something with them + // […] + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) + } + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", "pear", err) + } + } +*/ +package test diff --git a/test/v1alpha1/artifact_bucket_test.go b/test/v1alpha1/artifact_bucket_test.go new file mode 100644 index 00000000000..0fc1a3338b1 --- /dev/null +++ b/test/v1alpha1/artifact_bucket_test.go @@ -0,0 +1,266 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/artifacts" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +const ( + helloworldResourceName = "helloworldgit" + addFileTaskName = "add-file-to-resource-task" + runFileTaskName = "run-new-file-task" + bucketTestPipelineName = "bucket-test-pipeline" + bucketTestPipelineRunName = "bucket-test-pipeline-run" + systemNamespace = "tekton-pipelines" + bucketSecretName = "bucket-secret" + bucketSecretKey = "bucket-secret-key" +) + +// TestStorageBucketPipelineRun is an integration test that will verify a pipeline +// can use a bucket for temporary storage of artifacts shared between tasks +func TestStorageBucketPipelineRun(t *testing.T) { + configFilePath := os.Getenv("GCP_SERVICE_ACCOUNT_KEY_PATH") + if configFilePath == "" { + t.Skip("GCP_SERVICE_ACCOUNT_KEY_PATH variable is not set.") + } + c, namespace := setup(t) + // Bucket tests can't run in parallel without causing issues with other tests. + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + bucketName := fmt.Sprintf("build-pipeline-test-%s-%d", namespace, time.Now().Unix()) + + t.Logf("Creating Secret %s", bucketSecretName) + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getBucketSecret(t, configFilePath, namespace)); err != nil { + t.Fatalf("Failed to create Secret %q: %v", bucketSecretName, err) + } + defer deleteBucketSecret(c, t, namespace) + + t.Logf("Creating GCS bucket %s", bucketName) + createbuckettask := tb.Task("createbuckettask", namespace, tb.TaskSpec( + tb.TaskVolume("bucket-secret-volume", tb.VolumeSource(corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: bucketSecretName, + }, + })), + tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), + tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil mb gs://%s", bucketName)), + tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), + tb.StepEnvVar("CREDENTIALS", fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey)), + ), + ), + ) + + t.Logf("Creating Task %s", "createbuckettask") + if _, err := c.TaskClient.Create(createbuckettask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", "createbuckettask", err) + } + + createbuckettaskrun := tb.TaskRun("createbuckettaskrun", namespace, + tb.TaskRunSpec(tb.TaskRunTaskRef("createbuckettask"))) + + t.Logf("Creating TaskRun %s", "createbuckettaskrun") + if _, err := c.TaskRunClient.Create(createbuckettaskrun); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", "createbuckettaskrun", err) + } + + if err := WaitForTaskRunState(c, "createbuckettaskrun", TaskRunSucceed("createbuckettaskrun"), "TaskRunSuccess"); err != nil { + t.Errorf("Error waiting for TaskRun %s to finish: %s", "createbuckettaskrun", err) + } + + defer runTaskToDeleteBucket(c, t, namespace, bucketName, bucketSecretName, bucketSecretKey) + + originalConfigMap, err := c.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get(artifacts.GetBucketConfigName(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get ConfigMap `%s`: %s", artifacts.GetBucketConfigName(), err) + } + originalConfigMapData := originalConfigMap.Data + + t.Logf("Creating ConfigMap %s", artifacts.GetBucketConfigName()) + configMapData := map[string]string{ + artifacts.BucketLocationKey: fmt.Sprintf("gs://%s", bucketName), + artifacts.BucketServiceAccountSecretName: bucketSecretName, + artifacts.BucketServiceAccountSecretKey: bucketSecretKey, + } + if err := updateConfigMap(c.KubeClient, systemNamespace, artifacts.GetBucketConfigName(), configMapData); err != nil { + t.Fatal(err) + } + defer resetConfigMap(t, c, systemNamespace, artifacts.GetBucketConfigName(), originalConfigMapData) + + t.Logf("Creating Git PipelineResource %s", helloworldResourceName) + helloworldResource := tb.PipelineResource(helloworldResourceName, namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/pivotal-nader-ziada/gohelloworld"), + tb.PipelineResourceSpecParam("Revision", "master"), + ), + ) + if _, err := c.PipelineResourceClient.Create(helloworldResource); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", helloworldResourceName, err) + } + + t.Logf("Creating Task %s", addFileTaskName) + addFileTask := tb.Task(addFileTaskName, namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), + tb.TaskOutputs(tb.OutputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), + tb.Step("ubuntu", tb.StepName("addfile"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "'#!/bin/bash\necho hello' > /workspace/helloworldgit/newfile"), + ), + tb.Step("ubuntu", tb.StepName("make-executable"), tb.StepCommand("chmod"), + tb.StepArgs("+x", "/workspace/helloworldgit/newfile")), + )) + if _, err := c.TaskClient.Create(addFileTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", addFileTaskName, err) + } + + t.Logf("Creating Task %s", runFileTaskName) + readFileTask := tb.Task(runFileTaskName, namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), + tb.Step("ubuntu", tb.StepName("runfile"), tb.StepCommand("/workspace/helloworld/newfile")), + )) + if _, err := c.TaskClient.Create(readFileTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", runFileTaskName, err) + } + + t.Logf("Creating Pipeline %s", bucketTestPipelineName) + bucketTestPipeline := tb.Pipeline(bucketTestPipelineName, namespace, tb.PipelineSpec( + tb.PipelineDeclaredResource("source-repo", "git"), + tb.PipelineTask("addfile", addFileTaskName, + tb.PipelineTaskInputResource("helloworldgit", "source-repo"), + tb.PipelineTaskOutputResource("helloworldgit", "source-repo"), + ), + tb.PipelineTask("runfile", runFileTaskName, + tb.PipelineTaskInputResource("helloworldgit", "source-repo", tb.From("addfile")), + ), + )) + if _, err := c.PipelineClient.Create(bucketTestPipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", bucketTestPipelineName, err) + } + + t.Logf("Creating PipelineRun %s", bucketTestPipelineRunName) + bucketTestPipelineRun := tb.PipelineRun(bucketTestPipelineRunName, namespace, tb.PipelineRunSpec( + bucketTestPipelineName, + tb.PipelineRunResourceBinding("source-repo", tb.PipelineResourceBindingRef(helloworldResourceName)), + )) + if _, err := c.PipelineRunClient.Create(bucketTestPipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", bucketTestPipelineRunName, err) + } + + // Verify status of PipelineRun (wait for it) + if err := WaitForPipelineRunState(c, bucketTestPipelineRunName, timeout, PipelineRunSucceed(bucketTestPipelineRunName), "PipelineRunCompleted"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", bucketTestPipelineRunName, err) + t.Fatalf("PipelineRun execution failed") + } +} + +// updateConfigMap updates the config map for specified @name with values. We can't use the one from knativetest because +// it assumes that Data is already a non-nil map, and by default, it isn't! +func updateConfigMap(client *knativetest.KubeClient, name string, configName string, values map[string]string) error { + configMap, err := client.GetConfigMap(name).Get(configName, metav1.GetOptions{}) + if err != nil { + return err + } + + if configMap.Data == nil { + configMap.Data = make(map[string]string) + } + + for key, value := range values { + configMap.Data[key] = value + } + + _, err = client.GetConfigMap(name).Update(configMap) + return err +} + +func getBucketSecret(t *testing.T, configFilePath, namespace string) *corev1.Secret { + t.Helper() + f, err := ioutil.ReadFile(configFilePath) + if err != nil { + t.Fatalf("Failed to read json key file %s at path %s", err, configFilePath) + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: bucketSecretName, + }, + StringData: map[string]string{ + bucketSecretKey: string(f), + }, + } +} + +func deleteBucketSecret(c *clients, t *testing.T, namespace string) { + if err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Delete(bucketSecretName, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete Secret `%s`: %s", bucketSecretName, err) + } +} + +func resetConfigMap(t *testing.T, c *clients, namespace, configName string, values map[string]string) { + if err := updateConfigMap(c.KubeClient, namespace, configName, values); err != nil { + t.Log(err) + } +} + +func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, bucketSecretName, bucketSecretKey string) { + deletelbuckettask := tb.Task("deletelbuckettask", namespace, tb.TaskSpec( + tb.TaskVolume("bucket-secret-volume", tb.VolumeSource(corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: bucketSecretName, + }, + })), + tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), + tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil rm -r gs://%s", bucketName)), + tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), + tb.StepEnvVar("CREDENTIALS", fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey)), + ), + ), + ) + + t.Logf("Creating Task %s", "deletelbuckettask") + if _, err := c.TaskClient.Create(deletelbuckettask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", "deletelbuckettask", err) + } + + deletelbuckettaskrun := tb.TaskRun("deletelbuckettaskrun", namespace, + tb.TaskRunSpec(tb.TaskRunTaskRef("deletelbuckettask"))) + + t.Logf("Creating TaskRun %s", "deletelbuckettaskrun") + if _, err := c.TaskRunClient.Create(deletelbuckettaskrun); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", "deletelbuckettaskrun", err) + } + + if err := WaitForTaskRunState(c, "deletelbuckettaskrun", TaskRunSucceed("deletelbuckettaskrun"), "TaskRunSuccess"); err != nil { + t.Errorf("Error waiting for TaskRun %s to finish: %s", "deletelbuckettaskrun", err) + } +} diff --git a/test/v1alpha1/build_logs.go b/test/v1alpha1/build_logs.go new file mode 100644 index 00000000000..9b7eac9b0b0 --- /dev/null +++ b/test/v1alpha1/build_logs.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "io/ioutil" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "knative.dev/pkg/test/logging" +) + +// CollectPodLogs will get the logs for all containers in a Pod +func CollectPodLogs(c *clients, podName, namespace string, logf logging.FormatLogger) { + logs, err := getContainersLogsFromPod(c.KubeClient.Kube, podName, namespace) + if err != nil { + logf("Could not get logs for pod %s: %s", podName, err) + } + logf("build logs %s", logs) +} + +func getContainersLogsFromPod(c kubernetes.Interface, pod, namespace string) (string, error) { + p, err := c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) + if err != nil { + return "", err + } + + sb := strings.Builder{} + for _, container := range p.Spec.Containers { + sb.WriteString(fmt.Sprintf("\n>>> Container %s:\n", container.Name)) + logs, err := getContainerLogsFromPod(c, pod, container.Name, namespace) + if err != nil { + return "", err + } + sb.WriteString(logs) + } + return sb.String(), nil +} + +func getContainerLogsFromPod(c kubernetes.Interface, pod, container, namespace string) (string, error) { + sb := strings.Builder{} + req := c.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{Follow: true, Container: container}) + rc, err := req.Stream() + if err != nil { + return "", err + } + bs, err := ioutil.ReadAll(rc) + if err != nil { + return "", err + } + sb.Write(bs) + return sb.String(), nil +} diff --git a/test/cancel_test.go b/test/v1alpha1/cancel_test.go similarity index 100% rename from test/cancel_test.go rename to test/v1alpha1/cancel_test.go diff --git a/test/v1alpha1/clients.go b/test/v1alpha1/clients.go new file mode 100644 index 00000000000..3d4b9efafea --- /dev/null +++ b/test/v1alpha1/clients.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Get access to client objects + +To initialize client objects you can use the setup function. It returns a clients struct +that contains initialized clients for accessing: + + - Kubernetes objects + - Pipelines (https://github.com/tektoncd/pipeline#pipeline) + +For example, to create a Pipeline + + _, err = clients.PipelineClient.Pipelines.Create(test.Pipeline(namespaceName, pipelineName)) + +And you can use the client to clean up resources created by your test + + func tearDown(clients *test.Clients) { + if clients != nil { + clients.Delete([]string{routeName}, []string{configName}) + } + } + +*/ +package test + +import ( + "testing" + + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" + resourceversioned "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned" + resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1" + knativetest "knative.dev/pkg/test" +) + +// clients holds instances of interfaces for making requests to the Pipeline controllers. +type clients struct { + KubeClient *knativetest.KubeClient + + PipelineClient v1alpha1.PipelineInterface + TaskClient v1alpha1.TaskInterface + TaskRunClient v1alpha1.TaskRunInterface + PipelineRunClient v1alpha1.PipelineRunInterface + PipelineResourceClient resourcev1alpha1.PipelineResourceInterface + ConditionClient v1alpha1.ConditionInterface +} + +// newClients instantiates and returns several clientsets required for making requests to the +// Pipeline cluster specified by the combination of clusterName and configPath. Clients can +// make requests within namespace. +func newClients(t *testing.T, configPath, clusterName, namespace string) *clients { + t.Helper() + var err error + c := &clients{} + + c.KubeClient, err = knativetest.NewKubeClient(configPath, clusterName) + if err != nil { + t.Fatalf("failed to create kubeclient from config file at %s: %s", configPath, err) + } + + cfg, err := knativetest.BuildClientConfig(configPath, clusterName) + if err != nil { + t.Fatalf("failed to create configuration obj from %s for cluster %s: %s", configPath, clusterName, err) + } + + cs, err := versioned.NewForConfig(cfg) + if err != nil { + t.Fatalf("failed to create pipeline clientset from config file at %s: %s", configPath, err) + } + rcs, err := resourceversioned.NewForConfig(cfg) + if err != nil { + t.Fatalf("failed to create pipeline clientset from config file at %s: %s", configPath, err) + } + c.PipelineClient = cs.TektonV1alpha1().Pipelines(namespace) + c.TaskClient = cs.TektonV1alpha1().Tasks(namespace) + c.TaskRunClient = cs.TektonV1alpha1().TaskRuns(namespace) + c.PipelineRunClient = cs.TektonV1alpha1().PipelineRuns(namespace) + c.PipelineResourceClient = rcs.TektonV1alpha1().PipelineResources(namespace) + c.ConditionClient = cs.TektonV1alpha1().Conditions(namespace) + return c +} diff --git a/test/cluster_resource_test.go b/test/v1alpha1/cluster_resource_test.go similarity index 100% rename from test/cluster_resource_test.go rename to test/v1alpha1/cluster_resource_test.go diff --git a/test/v1alpha1/controller.go b/test/v1alpha1/controller.go new file mode 100644 index 00000000000..e02c848b049 --- /dev/null +++ b/test/v1alpha1/controller.go @@ -0,0 +1,178 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "context" + "testing" + + // Link in the fakes so they get injected into injection.Fake + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" + informersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" + fakeclustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake" + fakeconditioninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake" + fakepipelineinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake" + fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake" + faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake" + faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake" + fakeresourceclientset "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/fake" + resourceinformersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1" + fakeresourceclient "github.com/tektoncd/pipeline/pkg/client/resource/injection/client/fake" + fakeresourceinformer "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource/fake" + corev1 "k8s.io/api/core/v1" + coreinformers "k8s.io/client-go/informers/core/v1" + fakekubeclientset "k8s.io/client-go/kubernetes/fake" + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakepodinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/fake" + "knative.dev/pkg/controller" +) + +// Data represents the desired state of the system (i.e. existing resources) to seed controllers +// with. +type Data struct { + PipelineRuns []*v1alpha1.PipelineRun + Pipelines []*v1alpha1.Pipeline + TaskRuns []*v1alpha1.TaskRun + Tasks []*v1alpha1.Task + ClusterTasks []*v1alpha1.ClusterTask + PipelineResources []*v1alpha1.PipelineResource + Conditions []*v1alpha1.Condition + Pods []*corev1.Pod + Namespaces []*corev1.Namespace +} + +// Clients holds references to clients which are useful for reconciler tests. +type Clients struct { + Pipeline *fakepipelineclientset.Clientset + Resource *fakeresourceclientset.Clientset + Kube *fakekubeclientset.Clientset +} + +// Informers holds references to informers which are useful for reconciler tests. +type Informers struct { + PipelineRun informersv1alpha1.PipelineRunInformer + Pipeline informersv1alpha1.PipelineInformer + TaskRun informersv1alpha1.TaskRunInformer + Task informersv1alpha1.TaskInformer + ClusterTask informersv1alpha1.ClusterTaskInformer + PipelineResource resourceinformersv1alpha1.PipelineResourceInformer + Condition informersv1alpha1.ConditionInformer + Pod coreinformers.PodInformer +} + +// Assets holds references to the controller, logs, clients, and informers. +type Assets struct { + Controller *controller.Impl + Clients Clients +} + +// SeedTestData returns Clients and Informers populated with the +// given Data. +// nolint: golint +func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers) { + c := Clients{ + Kube: fakekubeclient.Get(ctx), + Pipeline: fakepipelineclient.Get(ctx), + Resource: fakeresourceclient.Get(ctx), + } + + i := Informers{ + PipelineRun: fakepipelineruninformer.Get(ctx), + Pipeline: fakepipelineinformer.Get(ctx), + TaskRun: faketaskruninformer.Get(ctx), + Task: faketaskinformer.Get(ctx), + ClusterTask: fakeclustertaskinformer.Get(ctx), + PipelineResource: fakeresourceinformer.Get(ctx), + Condition: fakeconditioninformer.Get(ctx), + Pod: fakepodinformer.Get(ctx), + } + + for _, pr := range d.PipelineRuns { + if err := i.PipelineRun.Informer().GetIndexer().Add(pr); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().PipelineRuns(pr.Namespace).Create(pr); err != nil { + t.Fatal(err) + } + } + for _, p := range d.Pipelines { + if err := i.Pipeline.Informer().GetIndexer().Add(p); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().Pipelines(p.Namespace).Create(p); err != nil { + t.Fatal(err) + } + } + for _, tr := range d.TaskRuns { + if err := i.TaskRun.Informer().GetIndexer().Add(tr); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().TaskRuns(tr.Namespace).Create(tr); err != nil { + t.Fatal(err) + } + } + for _, ta := range d.Tasks { + if err := i.Task.Informer().GetIndexer().Add(ta); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().Tasks(ta.Namespace).Create(ta); err != nil { + t.Fatal(err) + } + } + for _, ct := range d.ClusterTasks { + if err := i.ClusterTask.Informer().GetIndexer().Add(ct); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().ClusterTasks().Create(ct); err != nil { + t.Fatal(err) + } + } + for _, r := range d.PipelineResources { + if err := i.PipelineResource.Informer().GetIndexer().Add(r); err != nil { + t.Fatal(err) + } + if _, err := c.Resource.TektonV1alpha1().PipelineResources(r.Namespace).Create(r); err != nil { + t.Fatal(err) + } + } + for _, cond := range d.Conditions { + if err := i.Condition.Informer().GetIndexer().Add(cond); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().Conditions(cond.Namespace).Create(cond); err != nil { + t.Fatal(err) + } + } + for _, p := range d.Pods { + if err := i.Pod.Informer().GetIndexer().Add(p); err != nil { + t.Fatal(err) + } + if _, err := c.Kube.CoreV1().Pods(p.Namespace).Create(p); err != nil { + t.Fatal(err) + } + } + for _, n := range d.Namespaces { + if _, err := c.Kube.CoreV1().Namespaces().Create(n); err != nil { + t.Fatal(err) + } + } + c.Pipeline.ClearActions() + c.Kube.ClearActions() + return c, i +} diff --git a/test/dag_test.go b/test/v1alpha1/dag_test.go similarity index 100% rename from test/dag_test.go rename to test/v1alpha1/dag_test.go diff --git a/test/duplicate_test.go b/test/v1alpha1/duplicate_test.go similarity index 100% rename from test/duplicate_test.go rename to test/v1alpha1/duplicate_test.go diff --git a/test/embed_test.go b/test/v1alpha1/embed_test.go similarity index 100% rename from test/embed_test.go rename to test/v1alpha1/embed_test.go diff --git a/test/entrypoint_test.go b/test/v1alpha1/entrypoint_test.go similarity index 100% rename from test/entrypoint_test.go rename to test/v1alpha1/entrypoint_test.go diff --git a/test/git_checkout_test.go b/test/v1alpha1/git_checkout_test.go similarity index 100% rename from test/git_checkout_test.go rename to test/v1alpha1/git_checkout_test.go diff --git a/test/helm_task_test.go b/test/v1alpha1/helm_task_test.go similarity index 100% rename from test/helm_task_test.go rename to test/v1alpha1/helm_task_test.go diff --git a/test/v1alpha1/init_test.go b/test/v1alpha1/init_test.go new file mode 100644 index 00000000000..efac58ae8ee --- /dev/null +++ b/test/v1alpha1/init_test.go @@ -0,0 +1,213 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains initialization logic for the tests, such as special magical global state that needs to be initialized. + +package test + +import ( + "flag" + "fmt" + "os" + "strings" + "sync" + "testing" + + "github.com/ghodss/yaml" + "github.com/tektoncd/pipeline/pkg/names" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + knativetest "knative.dev/pkg/test" + "knative.dev/pkg/test/logging" + + // Mysteriously by k8s libs, or they fail to create `KubeClient`s from config. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // Mysteriously by k8s libs, or they fail to create `KubeClient`s when using oidc authentication. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/345 + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" +) + +var initMetrics sync.Once + +func setup(t *testing.T, fn ...func(*testing.T, *clients, string)) (*clients, string) { + t.Helper() + namespace := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("arendelle") + + initializeLogsAndMetrics(t) + + c := newClients(t, knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster, namespace) + createNamespace(t, namespace, c.KubeClient) + verifyServiceAccountExistence(t, namespace, c.KubeClient) + + for _, f := range fn { + f(t, c, namespace) + } + + return c, namespace +} + +func header(logf logging.FormatLogger, text string) { + left := "### " + right := " ###" + txt := left + text + right + bar := strings.Repeat("#", len(txt)) + logf(bar) + logf(txt) + logf(bar) +} + +func tearDown(t *testing.T, cs *clients, namespace string) { + t.Helper() + if cs.KubeClient == nil { + return + } + if t.Failed() { + header(t.Logf, fmt.Sprintf("Dumping objects from %s", namespace)) + bs, err := getCRDYaml(cs, namespace) + if err != nil { + t.Error(err) + } else { + t.Log(string(bs)) + } + header(t.Logf, fmt.Sprintf("Dumping logs from Pods in the %s", namespace)) + taskruns, err := cs.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun list %s", err) + } + for _, tr := range taskruns.Items { + if tr.Status.PodName != "" { + CollectPodLogs(cs, tr.Status.PodName, namespace, t.Logf) + } + } + } + + if os.Getenv("TEST_KEEP_NAMESPACES") == "" { + t.Logf("Deleting namespace %s", namespace) + if err := cs.KubeClient.Kube.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{}); err != nil { + t.Errorf("Failed to delete namespace %s: %s", namespace, err) + } + } +} + +func initializeLogsAndMetrics(t *testing.T) { + initMetrics.Do(func() { + flag.Parse() + flag.Set("alsologtostderr", "true") + logging.InitializeLogger() + + //if knativetest.Flags.EmitMetrics { + logging.InitializeMetricExporter(t.Name()) + //} + }) +} + +func createNamespace(t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { + t.Logf("Create namespace %s to deploy to", namespace) + if _, err := kubeClient.Kube.CoreV1().Namespaces().Create(&corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + }); err != nil { + t.Fatalf("Failed to create namespace %s for tests: %s", namespace, err) + } +} + +func verifyServiceAccountExistence(t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { + defaultSA := "default" + t.Logf("Verify SA %q is created in namespace %q", defaultSA, namespace) + + if err := wait.PollImmediate(interval, timeout, func() (bool, error) { + _, err := kubeClient.Kube.CoreV1().ServiceAccounts(namespace).Get(defaultSA, metav1.GetOptions{}) + if err != nil && errors.IsNotFound(err) { + return false, nil + } + return true, err + }); err != nil { + t.Fatalf("Failed to get SA %q in namespace %q for tests: %s", defaultSA, namespace, err) + } +} + +// TestMain initializes anything global needed by the tests. Right now this is just log and metric +// setup since the log and metric libs we're using use global state :( +func TestMain(m *testing.M) { + c := m.Run() + fmt.Fprintf(os.Stderr, "Using kubeconfig at `%s` with cluster `%s`\n", knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster) + os.Exit(c) +} + +func getCRDYaml(cs *clients, ns string) ([]byte, error) { + var output []byte + printOrAdd := func(i interface{}) { + bs, err := yaml.Marshal(i) + if err != nil { + return + } + output = append(output, []byte("\n---\n")...) + output = append(output, bs...) + } + + ps, err := cs.PipelineClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get pipeline: %w", err) + } + for _, i := range ps.Items { + printOrAdd(i) + } + + prs, err := cs.PipelineResourceClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get pipelinerun resource: %w", err) + } + for _, i := range prs.Items { + printOrAdd(i) + } + + prrs, err := cs.PipelineRunClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get pipelinerun: %w", err) + } + for _, i := range prrs.Items { + printOrAdd(i) + } + + ts, err := cs.TaskClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get tasks: %w", err) + } + for _, i := range ts.Items { + printOrAdd(i) + } + trs, err := cs.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get taskrun: %w", err) + } + for _, i := range trs.Items { + printOrAdd(i) + } + + pods, err := cs.KubeClient.Kube.CoreV1().Pods(ns).List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get pods: %w", err) + } + for _, i := range pods.Items { + printOrAdd(i) + } + + return output, nil +} diff --git a/test/kaniko_task_test.go b/test/v1alpha1/kaniko_task_test.go similarity index 100% rename from test/kaniko_task_test.go rename to test/v1alpha1/kaniko_task_test.go diff --git a/test/v1alpha1/ko_test.go b/test/v1alpha1/ko_test.go new file mode 100644 index 00000000000..8fd393fd600 --- /dev/null +++ b/test/v1alpha1/ko_test.go @@ -0,0 +1,53 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "errors" + "fmt" + "os" + "testing" +) + +var ( + // Wether missing KO_DOCKER_REPO environment variable should be fatal or not + missingKoFatal = "true" +) + +func ensureDockerRepo(t *testing.T) string { + repo, err := getDockerRepo() + if err != nil { + if missingKoFatal == "false" { + t.Skip("KO_DOCKER_REPO env variable is required") + } + t.Fatal("KO_DOCKER_REPO env variable is required") + } + return repo +} + +func getDockerRepo() (string, error) { + // according to knative/test-infra readme (https://github.com/knative/test-infra/blob/13055d769cc5e1756e605fcb3bcc1c25376699f1/scripts/README.md) + // the KO_DOCKER_REPO will be set with according to the project where the cluster is created + // it is used here to dynamically get the docker registry to push the image to + dockerRepo := os.Getenv("KO_DOCKER_REPO") + if dockerRepo == "" { + return "", errors.New("KO_DOCKER_REPO env variable is required") + } + return fmt.Sprintf("%s/kanikotasktest", dockerRepo), nil +} diff --git a/test/v1alpha1/pipelinerun_test.go b/test/v1alpha1/pipelinerun_test.go new file mode 100644 index 00000000000..575c67996ae --- /dev/null +++ b/test/v1alpha1/pipelinerun_test.go @@ -0,0 +1,558 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "encoding/base64" + "fmt" + "strings" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/artifacts" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/apis" + knativetest "knative.dev/pkg/test" +) + +var ( + pipelineName = "pipeline" + pipelineRunName = "pipelinerun" + secretName = "secret" + saName = "service-account" + taskName = "task" + task1Name = "task1" + cond1Name = "cond-1" + pipelineRunTimeout = 10 * time.Minute +) + +func TestPipelineRun(t *testing.T) { + t.Parallel() + type tests struct { + name string + testSetup func(t *testing.T, c *clients, namespace string, index int) + expectedTaskRuns []string + expectedNumberOfEvents int + pipelineRunFunc func(int, string) *v1alpha1.PipelineRun + } + + tds := []tests{{ + name: "fan-in and fan-out", + testSetup: func(t *testing.T, c *clients, namespace string, index int) { + t.Helper() + for _, task := range getFanInFanOutTasks(namespace) { + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) + } + } + + for _, res := range getFanInFanOutGitResources(namespace) { + if _, err := c.PipelineResourceClient.Create(res); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) + } + } + + if _, err := c.PipelineClient.Create(getFanInFanOutPipeline(index, namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + } + }, + pipelineRunFunc: getFanInFanOutPipelineRun, + expectedTaskRuns: []string{"create-file-kritis", "create-fan-out-1", "create-fan-out-2", "check-fan-in"}, + // 1 from PipelineRun and 4 from Tasks defined in pipelinerun + expectedNumberOfEvents: 5, + }, { + name: "service account propagation and pipeline param", + testSetup: func(t *testing.T, c *clients, namespace string, index int) { + t.Helper() + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getPipelineRunSecret(index, namespace)); err != nil { + t.Fatalf("Failed to create secret `%s`: %s", getName(secretName, index), err) + } + + if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(getPipelineRunServiceAccount(index, namespace)); err != nil { + t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err) + } + + task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsParamSpec("path", v1alpha1.ParamTypeString), + tb.InputsParamSpec("dest", v1alpha1.ParamTypeString)), + // Reference build: https://github.com/knative/build/tree/master/test/docker-basic + tb.Step("quay.io/rhpipeline/skopeo:alpine", tb.StepName("config-docker"), + tb.StepCommand("skopeo"), + tb.StepArgs("copy", "$(inputs.params.path)", "$(inputs.params.dest)"), + ), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) + } + + if _, err := c.PipelineClient.Create(getHelloWorldPipelineWithSingularTask(index, namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + } + }, + expectedTaskRuns: []string{task1Name}, + // 1 from PipelineRun and 1 from Tasks defined in pipelinerun + expectedNumberOfEvents: 2, + pipelineRunFunc: getHelloWorldPipelineRun, + }, { + name: "pipeline succeeds when task skipped due to failed condition", + testSetup: func(t *testing.T, c *clients, namespace string, index int) { + t.Helper() + cond := getFailingCondition(namespace) + if _, err := c.ConditionClient.Create(cond); err != nil { + t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err) + } + + task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec( + tb.Step("ubuntu", + tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo hello, world"), + ), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) + } + if _, err := c.PipelineClient.Create(getPipelineWithFailingCondition(index, namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + } + }, + expectedTaskRuns: []string{}, + // 1 from PipelineRun; 0 from taskrun since it should not be executed due to condition failing + expectedNumberOfEvents: 1, + pipelineRunFunc: getConditionalPipelineRun, + }} + + for i, td := range tds { + t.Run(td.name, func(t *testing.T) { + td := td + t.Parallel() + c, namespace := setup(t) + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Setting up test resources for %q test in namespace %s", td.name, namespace) + td.testSetup(t, c, namespace, i) + + prName := fmt.Sprintf("%s%d", pipelineRunName, i) + pipelineRun, err := c.PipelineRunClient.Create(td.pipelineRunFunc(i, namespace)) + if err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err) + } + + t.Logf("Waiting for PipelineRun %s in namespace %s to complete", prName, namespace) + if err := WaitForPipelineRunState(c, prName, pipelineRunTimeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil { + t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err) + } + + t.Logf("Making sure the expected TaskRuns %s were created", td.expectedTaskRuns) + actualTaskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", prName)}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err) + } + expectedTaskRunNames := []string{} + for _, runName := range td.expectedTaskRuns { + taskRunName := strings.Join([]string{prName, runName}, "-") + // check the actual task name starting with prName+runName with a random suffix + for _, actualTaskRunItem := range actualTaskrunList.Items { + if strings.HasPrefix(actualTaskRunItem.Name, taskRunName) { + taskRunName = actualTaskRunItem.Name + } + } + expectedTaskRunNames = append(expectedTaskRunNames, taskRunName) + r, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) + } + if !r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() { + t.Fatalf("Expected TaskRun %s to have succeeded but Status is %v", taskRunName, r.Status) + } + + t.Logf("Checking that labels were propagated correctly for TaskRun %s", r.Name) + checkLabelPropagation(t, c, namespace, prName, r) + t.Logf("Checking that annotations were propagated correctly for TaskRun %s", r.Name) + checkAnnotationPropagation(t, c, namespace, prName, r) + } + + matchKinds := map[string][]string{"PipelineRun": {prName}, "TaskRun": expectedTaskRunNames} + + t.Logf("Making sure %d events were created from taskrun and pipelinerun with kinds %v", td.expectedNumberOfEvents, matchKinds) + + events, err := collectMatchingEvents(c.KubeClient, namespace, matchKinds, "Succeeded") + if err != nil { + t.Fatalf("Failed to collect matching events: %q", err) + } + if len(events) != td.expectedNumberOfEvents { + t.Fatalf("Expected %d number of successful events from pipelinerun and taskrun but got %d; list of receieved events : %#v", td.expectedNumberOfEvents, len(events), events) + } + + // Wait for up to 10 minutes and restart every second to check if + // the PersistentVolumeClaims has the DeletionTimestamp + if err := wait.PollImmediate(interval, timeout, func() (bool, error) { + // Check to make sure the PipelineRun's artifact storage PVC has been "deleted" at the end of the run. + pvc, errWait := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(artifacts.GetPVCName(pipelineRun), metav1.GetOptions{}) + if errWait != nil && !errors.IsNotFound(errWait) { + return true, fmt.Errorf("error looking up PVC %s for PipelineRun %s: %s", artifacts.GetPVCName(pipelineRun), prName, errWait) + } + // If we are not found then we are okay since it got cleaned up + if errors.IsNotFound(errWait) { + return true, nil + } + return pvc.DeletionTimestamp != nil, nil + }); err != nil { + t.Fatalf("Error while waiting for the PVC to be set as deleted: %s: %s: %s", artifacts.GetPVCName(pipelineRun), err, prName) + } + t.Logf("Successfully finished test %q", td.name) + }) + } +} + +func getHelloWorldPipelineWithSingularTask(suffix int, namespace string) *v1alpha1.Pipeline { + return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( + tb.PipelineParamSpec("path", v1alpha1.ParamTypeString), + tb.PipelineParamSpec("dest", v1alpha1.ParamTypeString), + tb.PipelineTask(task1Name, getName(taskName, suffix), + tb.PipelineTaskParam("path", "$(params.path)"), + tb.PipelineTaskParam("dest", "$(params.dest)")), + )) +} + +func getFanInFanOutTasks(namespace string) []*v1alpha1.Task { + inWorkspaceResource := tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit) + outWorkspaceResource := tb.OutputsResource("workspace", v1alpha1.PipelineResourceTypeGit) + return []*v1alpha1.Task{ + tb.Task("create-file", namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit, + tb.ResourceTargetPath("brandnewspace"), + )), + tb.TaskOutputs(outWorkspaceResource), + tb.Step("ubuntu", tb.StepName("write-data-task-0-step-0"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo stuff > $(outputs.resources.workspace.path)/stuff"), + ), + tb.Step("ubuntu", tb.StepName("write-data-task-0-step-1"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo other > $(outputs.resources.workspace.path)/other"), + ), + )), + tb.Task("check-create-files-exists", namespace, tb.TaskSpec( + tb.TaskInputs(inWorkspaceResource), + tb.TaskOutputs(outWorkspaceResource), + tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "[[ stuff == $(cat $(inputs.resources.workspace.path)/stuff) ]]"), + ), + tb.Step("ubuntu", tb.StepName("write-data-task-1"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo something > $(outputs.resources.workspace.path)/something"), + ), + )), + tb.Task("check-create-files-exists-2", namespace, tb.TaskSpec( + tb.TaskInputs(inWorkspaceResource), + tb.TaskOutputs(outWorkspaceResource), + tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "[[ other == $(cat $(inputs.resources.workspace.path)/other) ]]"), + ), + tb.Step("ubuntu", tb.StepName("write-data-task-1"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo else > $(outputs.resources.workspace.path)/else"), + ), + )), + tb.Task("read-files", namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit, + tb.ResourceTargetPath("readingspace"), + )), + tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "[[ something == $(cat $(inputs.resources.workspace.path)/something) ]]"), + ), + tb.Step("ubuntu", tb.StepName("read-from-task-1"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "[[ else == $(cat $(inputs.resources.workspace.path)/else) ]]"), + ), + )), + } +} + +func getFanInFanOutPipeline(suffix int, namespace string) *v1alpha1.Pipeline { + outGitResource := tb.PipelineTaskOutputResource("workspace", "git-repo") + + return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( + tb.PipelineDeclaredResource("git-repo", "git"), + tb.PipelineTask("create-file-kritis", "create-file", + tb.PipelineTaskInputResource("workspace", "git-repo"), + outGitResource, + ), + tb.PipelineTask("create-fan-out-1", "check-create-files-exists", + tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")), + outGitResource, + ), + tb.PipelineTask("create-fan-out-2", "check-create-files-exists-2", + tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")), + outGitResource, + ), + tb.PipelineTask("check-fan-in", "read-files", + tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-fan-out-2", "create-fan-out-1")), + ), + )) +} + +func getFanInFanOutGitResources(namespace string) []*v1alpha1.PipelineResource { + return []*v1alpha1.PipelineResource{ + tb.PipelineResource("kritis-resource-git", namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/grafeas/kritis"), + tb.PipelineResourceSpecParam("Revision", "master"), + )), + } +} + +func getPipelineRunServiceAccount(suffix int, namespace string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: getName(saName, suffix), + }, + Secrets: []corev1.ObjectReference{{ + Name: getName(secretName, suffix), + }}, + } +} +func getFanInFanOutPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { + return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, + tb.PipelineRunSpec(getName(pipelineName, suffix), + tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef("kritis-resource-git")), + )) +} + +func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { + // Generated by: + // cat /tmp/key.json | base64 -w 0 + // This service account is JUST a storage reader on gcr.io/build-crd-testing + encoedDockercred := "ewogICJ0eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgInByb2plY3RfaWQiOiAiYnVpbGQtY3JkLXRlc3RpbmciLAogICJwcml2YXRlX2tleV9pZCI6ICIwNTAyYTQxYTgxMmZiNjRjZTU2YTY4ZWM1ODMyYWIwYmExMWMxMWU2IiwKICAicHJpdmF0ZV9rZXkiOiAiLS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tXG5NSUlFdlFJQkFEQU5CZ2txaGtpRzl3MEJBUUVGQUFTQ0JLY3dnZ1NqQWdFQUFvSUJBUUM5WDRFWU9BUmJ4UU04XG5EMnhYY2FaVGsrZ1k4ZWp1OTh0THFDUXFUckdNVzlSZVQyeE9ZNUF5Z2FsUFArcDd5WEVja3dCRC9IaE0wZ2xJXG43TVRMZGVlS1dyK3JBMUx3SFp5V0ZXN0gwT25mN3duWUhFSExXVW1jM0JDT1JFRHRIUlo3WnJQQmYxSFRBQS8zXG5Nblc1bFpIU045b2p6U1NGdzZBVnU2ajZheGJCSUlKNzU0THJnS2VBWXVyd2ZJUTJSTFR1MjAxazJJcUxZYmhiXG4zbVNWRzVSK3RiS3oxQ3ZNNTNuSENiN0NmdVZlV3NyQThrazd4SHJyTFFLTW1JOXYyc2dSdWd5TUF6d3ovNnpOXG5oNS9pTXh4Z2VxNVc4eGtWeDNKMm5ZOEpKZEhhZi9UNkFHc09ORW80M3B4ZWlRVmpuUmYvS24xMFRDYzJFc0lZXG5TNDlVc1o3QkFnTUJBQUVDZ2dFQUF1cGxkdWtDUVF1RDVVL2dhbUh0N0dnVzNBTVYxOGVxbkhuQ2EyamxhaCtTXG5BZVVHbmhnSmpOdkUrcE1GbFN2NXVmMnAySzRlZC9veEQ2K0NwOVpYRFJqZ3ZmdEl5cWpsemJ3dkZjZ3p3TnVEXG55Z1VrdXA3SGVjRHNEOFR0ZUFvYlQvVnB3cTZ6S01yQndDdk5rdnk2YlZsb0VqNXgzYlhzYXhlOTVETy95cHU2XG53MFc5N3p4d3dESlk2S1FjSVdNamhyR3h2d1g3bmlVQ2VNNGxlV0JEeUd0dzF6ZUpuNGhFYzZOM2FqUWFjWEtjXG4rNFFseGNpYW1ZcVFXYlBudHhXUWhoUXpjSFdMaTJsOWNGYlpENyt1SkxGNGlONnk4bVZOVTNLM0sxYlJZclNEXG5SVXAzYVVWQlhtRmcrWi8ycHVWTCttVTNqM0xMV1l5Qk9rZXZ1T21kZ1FLQmdRRGUzR0lRa3lXSVMxNFRkTU9TXG5CaUtCQ0R5OGg5NmVoTDBIa0RieU9rU3RQS2RGOXB1RXhaeGh5N29qSENJTTVGVnJwUk4yNXA0c0V6d0ZhYyt2XG5KSUZnRXZxN21YZm1YaVhJTmllUG9FUWFDbm54RHhXZ21yMEhVS0VtUzlvTWRnTGNHVStrQ1ZHTnN6N0FPdW0wXG5LcVkzczIyUTlsUTY3Rk95cWl1OFdGUTdRUUtCZ1FEWmlGaFRFWmtQRWNxWmpud0pwVEI1NlpXUDlLVHNsWlA3XG53VTRiemk2eSttZXlmM01KKzRMMlN5SGMzY3BTTWJqdE5PWkN0NDdiOTA4RlVtTFhVR05oY3d1WmpFUXhGZXkwXG5tNDFjUzVlNFA0OWI5bjZ5TEJqQnJCb3FzMldCYWwyZWdkaE5KU3NDV29pWlA4L1pUOGVnWHZoN2I5MWp6b0syXG5xMlBVbUE0RGdRS0JnQVdMMklqdkVJME95eDJTMTFjbi9lM1dKYVRQZ05QVEc5MDNVcGErcW56aE9JeCtNYXFoXG5QRjRXc3VBeTBBb2dHSndnTkpiTjhIdktVc0VUdkE1d3l5TjM5WE43dzBjaGFyRkwzN29zVStXT0F6RGpuamNzXG5BcTVPN0dQR21YdWI2RUJRQlBKaEpQMXd5NHYvSzFmSGcvRjQ3cTRmNDBMQUpPa2FZUkpENUh6QkFvR0JBTlVoXG5uSUJQSnFxNElNdlE2Y0M5ZzhCKzF4WURlYTkvWWsxdytTbVBHdndyRVh5M0dLeDRLN2xLcGJQejdtNFgzM3N4XG5zRVUvK1kyVlFtd1JhMXhRbS81M3JLN1YybDVKZi9ENDAwalJtNlpmU0FPdmdEVHJ0Wm5VR0pNcno5RTd1Tnc3XG5sZ1VIM0pyaXZ5Ri9meE1JOHFzelFid1hQMCt4bnlxQXhFQWdkdUtCQW9HQUlNK1BTTllXQ1pYeERwU0hJMThkXG5qS2tvQWJ3Mk1veXdRSWxrZXVBbjFkWEZhZDF6c1hRR2RUcm1YeXY3TlBQKzhHWEJrbkJMaTNjdnhUaWxKSVN5XG51Y05yQ01pcU5BU24vZHE3Y1dERlVBQmdqWDE2SkgyRE5GWi9sL1VWRjNOREFKalhDczFYN3lJSnlYQjZveC96XG5hU2xxbElNVjM1REJEN3F4Unl1S3Nnaz1cbi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS1cbiIsCiAgImNsaWVudF9lbWFpbCI6ICJwdWxsLXNlY3JldC10ZXN0aW5nQGJ1aWxkLWNyZC10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjEwNzkzNTg2MjAzMzAyNTI1MTM1MiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L3B1bGwtc2VjcmV0LXRlc3RpbmclNDBidWlsZC1jcmQtdGVzdGluZy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIKfQo=" + + decoded, err := base64.StdEncoding.DecodeString(encoedDockercred) + if err != nil { + return nil + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: getName(secretName, suffix), + Annotations: map[string]string{ + "tekton.dev/docker-0": "https://us.gcr.io", + "tekton.dev/docker-1": "https://eu.gcr.io", + "tekton.dev/docker-2": "https://asia.gcr.io", + "tekton.dev/docker-3": "https://gcr.io", + }, + }, + Type: "kubernetes.io/basic-auth", + Data: map[string][]byte{ + "username": []byte("_json_key"), + "password": decoded, + }, + } +} + +func getHelloWorldPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { + return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, + tb.PipelineRunLabel("hello-world-key", "hello-world-value"), + tb.PipelineRunSpec(getName(pipelineName, suffix), + tb.PipelineRunParam("path", "docker://gcr.io/build-crd-testing/secret-sauce"), + tb.PipelineRunParam("dest", "dir:///tmp/"), + tb.PipelineRunServiceAccountName(fmt.Sprintf("%s%d", saName, suffix)), + ), + ) +} + +func getName(namespace string, suffix int) string { + return fmt.Sprintf("%s%d", namespace, suffix) +} + +// collectMatchingEvents collects list of events under 5 seconds that match +// 1. matchKinds which is a map of Kind of Object with name of objects +// 2. reason which is the expected reason of event +func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string, reason string) ([]*corev1.Event, error) { + var events []*corev1.Event + + watchEvents, err := kubeClient.Kube.CoreV1().Events(namespace).Watch(metav1.ListOptions{}) + // close watchEvents channel + defer watchEvents.Stop() + if err != nil { + return events, err + } + + // create timer to not wait for events longer than 5 seconds + timer := time.NewTimer(5 * time.Second) + + for { + select { + case wevent := <-watchEvents.ResultChan(): + event := wevent.Object.(*corev1.Event) + if val, ok := kinds[event.InvolvedObject.Kind]; ok { + for _, expectedName := range val { + if event.InvolvedObject.Name == expectedName && event.Reason == reason { + events = append(events, event) + } + } + } + case <-timer.C: + return events, nil + } + } +} + +// checkLabelPropagation checks that labels are correctly propagating from +// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. +func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { + // Our controllers add 4 labels automatically. If custom labels are set on + // the Pipeline, PipelineRun, or Task then the map will have to be resized. + labels := make(map[string]string, 4) + + // Check label propagation to PipelineRuns. + pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err) + } + p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err) + } + for key, val := range p.ObjectMeta.Labels { + labels[key] = val + } + // This label is added to every PipelineRun by the PipelineRun controller + labels[pipeline.GroupName+pipeline.PipelineLabelKey] = p.Name + assertLabelsMatch(t, labels, pr.ObjectMeta.Labels) + + // Check label propagation to TaskRuns. + for key, val := range pr.ObjectMeta.Labels { + labels[key] = val + } + // This label is added to every TaskRun by the PipelineRun controller + labels[pipeline.GroupName+pipeline.PipelineRunLabelKey] = pr.Name + if tr.Spec.TaskRef != nil { + task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err) + } + for key, val := range task.ObjectMeta.Labels { + labels[key] = val + } + // This label is added to TaskRuns that reference a Task by the TaskRun controller + labels[pipeline.GroupName+pipeline.TaskLabelKey] = task.Name + } + assertLabelsMatch(t, labels, tr.ObjectMeta.Labels) + + // PodName is "" iff a retry happened and pod is deleted + // This label is added to every Pod by the TaskRun controller + if tr.Status.PodName != "" { + // Check label propagation to Pods. + pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + // This label is added to every Pod by the TaskRun controller + labels[pipeline.GroupName+pipeline.TaskRunLabelKey] = tr.Name + assertLabelsMatch(t, labels, pod.ObjectMeta.Labels) + } +} + +// checkAnnotationPropagation checks that annotations are correctly propagating from +// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. +func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { + annotations := make(map[string]string) + + // Check annotation propagation to PipelineRuns. + pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err) + } + p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err) + } + for key, val := range p.ObjectMeta.Annotations { + annotations[key] = val + } + assertAnnotationsMatch(t, annotations, pr.ObjectMeta.Annotations) + + // Check annotation propagation to TaskRuns. + for key, val := range pr.ObjectMeta.Annotations { + annotations[key] = val + } + if tr.Spec.TaskRef != nil { + task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err) + } + for key, val := range task.ObjectMeta.Annotations { + annotations[key] = val + } + } + assertAnnotationsMatch(t, annotations, tr.ObjectMeta.Annotations) + + // Check annotation propagation to Pods. + pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + assertAnnotationsMatch(t, annotations, pod.ObjectMeta.Annotations) +} + +func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1alpha1.TaskRun) *corev1.Pod { + // The Pod name has a random suffix, so we filter by label to find the one we care about. + pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{ + LabelSelector: pipeline.GroupName + pipeline.TaskRunLabelKey + " = " + tr.Name, + }) + if err != nil { + t.Fatalf("Couldn't get expected Pod for %s: %s", tr.Name, err) + } + if numPods := len(pods.Items); numPods != 1 { + t.Fatalf("Expected 1 Pod for %s, but got %d Pods", tr.Name, numPods) + } + return &pods.Items[0] +} + +func assertLabelsMatch(t *testing.T, expectedLabels, actualLabels map[string]string) { + for key, expectedVal := range expectedLabels { + if actualVal := actualLabels[key]; actualVal != expectedVal { + t.Errorf("Expected labels containing %s=%s but labels were %v", key, expectedVal, actualLabels) + } + } +} + +func assertAnnotationsMatch(t *testing.T, expectedAnnotations, actualAnnotations map[string]string) { + for key, expectedVal := range expectedAnnotations { + if actualVal := actualAnnotations[key]; actualVal != expectedVal { + t.Errorf("Expected annotations containing %s=%s but annotations were %v", key, expectedVal, actualAnnotations) + } + } +} + +func getPipelineWithFailingCondition(suffix int, namespace string) *v1alpha1.Pipeline { + return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( + tb.PipelineTask(task1Name, getName(taskName, suffix), tb.PipelineTaskCondition(cond1Name)), + tb.PipelineTask("task2", getName(taskName, suffix), tb.RunAfter(task1Name)), + )) +} + +func getFailingCondition(namespace string) *v1alpha1.Condition { + return tb.Condition(cond1Name, namespace, tb.ConditionSpec(tb.ConditionSpecCheck("", "ubuntu", + tb.Command("/bin/bash"), tb.Args("exit 1")))) +} + +func getConditionalPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { + return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, + tb.PipelineRunLabel("hello-world-key", "hello-world-value"), + tb.PipelineRunSpec(getName(pipelineName, suffix)), + ) +} diff --git a/test/v1alpha1/registry_test.go b/test/v1alpha1/registry_test.go new file mode 100644 index 00000000000..32654b62c4f --- /dev/null +++ b/test/v1alpha1/registry_test.go @@ -0,0 +1,93 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package test + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func withRegistry(t *testing.T, c *clients, namespace string) { + deployment := getRegistryDeployment(namespace) + if _, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Create(deployment); err != nil { + t.Fatalf("Failed to create the local registry deployment: %v", err) + } + if err := WaitForDeploymentState(c, deployment.Name, namespace, func(d *appsv1.Deployment) (bool, error) { + var replicas int32 = 1 + if d.Spec.Replicas != nil { + replicas = *d.Spec.Replicas + } + return d.Status.ReadyReplicas == replicas, nil + }, "DeploymentPodRunning"); err != nil { + t.Fatalf("Error waiting for Deployment %q to be ready: %v", deployment.Name, err) + } + + service := getRegistryService(namespace) + if _, err := c.KubeClient.Kube.CoreV1().Services(namespace).Create(service); err != nil { + t.Fatalf("Failed to create the local registry service: %v", err) + } +} + +func getRegistryDeployment(namespace string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "registry", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "registry", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "registry", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "registry", + Image: "registry", + }}, + }, + }, + }, + } +} + +func getRegistryService(namespace string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "registry", + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Port: 5000, + }}, + Selector: map[string]string{ + "app": "registry", + }, + }, + } +} diff --git a/test/retry_test.go b/test/v1alpha1/retry_test.go similarity index 100% rename from test/retry_test.go rename to test/v1alpha1/retry_test.go diff --git a/test/v1alpha1/secret.go b/test/v1alpha1/secret.go new file mode 100644 index 00000000000..37ea00c080a --- /dev/null +++ b/test/v1alpha1/secret.go @@ -0,0 +1,63 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +// CreateGCPServiceAccountSecret will create a kube secret called secretName in namespace +// from the value in the GCP_SERVICE_ACCOUNT_KEY_PATH environment variable. If the env var +// doesn't exist, no secret will be created. Returns true if the secret was created, false +// otherwise. +func CreateGCPServiceAccountSecret(t *testing.T, c *knativetest.KubeClient, namespace string, secretName string) (bool, error) { + t.Helper() + file := os.Getenv("GCP_SERVICE_ACCOUNT_KEY_PATH") + if file == "" { + t.Logf("Not creating service account secret, relying on default credentials in namespace %s.", namespace) + return false, nil + } + + sec := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + } + + bs, err := ioutil.ReadFile(file) + if err != nil { + return false, fmt.Errorf("couldn't read secret json from %s: %w", file, err) + } + + sec.Data = map[string][]byte{ + "config.json": bs, + } + _, err = c.Kube.CoreV1().Secrets(namespace).Create(sec) + + t.Log("Creating service account secret") + return true, err +} diff --git a/test/v1alpha1/sidecar_test.go b/test/v1alpha1/sidecar_test.go new file mode 100644 index 00000000000..b2186e48b23 --- /dev/null +++ b/test/v1alpha1/sidecar_test.go @@ -0,0 +1,163 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + sidecarTaskName = "sidecar-test-task" + sidecarTaskRunName = "sidecar-test-task-run" + sidecarContainerName = "sidecar-container" + primaryContainerName = "primary" +) + +// TestSidecarTaskSupport checks whether support for sidecars is working +// as expected by running a Task with a Sidecar defined and confirming +// that both the primary and sidecar containers terminate. +func TestSidecarTaskSupport(t *testing.T) { + tests := []struct { + desc string + stepCommand []string + sidecarCommand []string + }{{ + desc: "A sidecar that runs forever is terminated when Steps complete", + stepCommand: []string{"echo", "\"hello world\""}, + sidecarCommand: []string{"sh", "-c", "while [[ true ]] ; do echo \"hello from sidecar\" ; done"}, + }, { + desc: "A sidecar that terminates early does not cause problems running Steps", + stepCommand: []string{"echo", "\"hello world\""}, + sidecarCommand: []string{"echo", "\"hello from sidecar\""}, + }} + + clients, namespace := setup(t) + + for i, test := range tests { + t.Run(test.desc, func(t *testing.T) { + sidecarTaskName := fmt.Sprintf("%s-%d", sidecarTaskName, i) + sidecarTaskRunName := fmt.Sprintf("%s-%d", sidecarTaskRunName, i) + task := tb.Task(sidecarTaskName, namespace, + tb.TaskSpec( + tb.Step( + "busybox:1.31.0-musl", + tb.StepName(primaryContainerName), + tb.StepCommand(test.stepCommand...), + ), + tb.Sidecar( + sidecarContainerName, + "busybox:1.31.0-musl", + tb.Command(test.sidecarCommand...), + ), + ), + ) + + taskRun := tb.TaskRun(sidecarTaskRunName, namespace, + tb.TaskRunSpec(tb.TaskRunTaskRef(sidecarTaskName), + tb.TaskRunTimeout(1*time.Minute), + ), + ) + + t.Logf("Creating Task %q", sidecarTaskName) + if _, err := clients.TaskClient.Create(task); err != nil { + t.Errorf("Failed to create Task %q: %v", sidecarTaskName, err) + } + + t.Logf("Creating TaskRun %q", sidecarTaskRunName) + if _, err := clients.TaskRunClient.Create(taskRun); err != nil { + t.Errorf("Failed to create TaskRun %q: %v", sidecarTaskRunName, err) + } + + var podName string + if err := WaitForTaskRunState(clients, sidecarTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { + podName = tr.Status.PodName + return TaskRunSucceed(sidecarTaskRunName)(tr) + }, "TaskRunSucceed"); err != nil { + t.Errorf("Error waiting for TaskRun %q to finish: %v", sidecarTaskRunName, err) + } + + if err := WaitForPodState(clients, podName, namespace, func(pod *corev1.Pod) (bool, error) { + terminatedCount := 0 + for _, c := range pod.Status.ContainerStatuses { + if c.State.Terminated != nil { + terminatedCount++ + } + } + return terminatedCount == 2, nil + }, "PodContainersTerminated"); err != nil { + t.Errorf("Error waiting for Pod %q to terminate both the primary and sidecar containers: %v", podName, err) + } + + pod, err := clients.KubeClient.Kube.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun pod: %v", err) + } + + primaryTerminated := false + sidecarTerminated := false + + for _, c := range pod.Status.ContainerStatuses { + if c.Name == fmt.Sprintf("step-%s", primaryContainerName) { + if c.State.Terminated == nil || c.State.Terminated.Reason != "Completed" { + t.Errorf("Primary container has nil Terminated state or did not complete successfully. Actual Terminated state: %v", c.State.Terminated) + } else { + primaryTerminated = true + } + } + if c.Name == fmt.Sprintf("sidecar-%s", sidecarContainerName) { + if c.State.Terminated == nil { + t.Errorf("Sidecar container has a nil Terminated status but non-nil is expected.") + } else { + sidecarTerminated = true + } + } + } + + if !primaryTerminated || !sidecarTerminated { + t.Errorf("Either the primary or sidecar containers did not terminate") + } + + trCheckSidecarStatus, err := clients.TaskRunClient.Get(sidecarTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun: %v", err) + } + + sidecarFromStatus := trCheckSidecarStatus.Status.Sidecars[0] + + // Check if Sidecar ContainerName is present for SidecarStatus + if sidecarFromStatus.ContainerName != fmt.Sprintf("sidecar-%s", sidecarContainerName) { + t.Errorf("Sidecar ContainerName should be: %s", sidecarContainerName) + } + + // Check if Terminated status is present for SidecarStatus + if trCheckSidecarStatus.Name == "sidecar-test-task-run-1" && sidecarFromStatus.Terminated == nil { + t.Errorf("TaskRunStatus: Sidecar container has a nil Terminated status but non-nil is expected.") + } else if trCheckSidecarStatus.Name == "sidecar-test-task-run-1" && sidecarFromStatus.Terminated.Reason != "Completed" { + t.Errorf("TaskRunStatus: Sidecar container has a nil Terminated reason of %s but should be Completed", sidecarFromStatus.Terminated.Reason) + } + }) + } +} diff --git a/test/start_time_test.go b/test/v1alpha1/start_time_test.go similarity index 100% rename from test/start_time_test.go rename to test/v1alpha1/start_time_test.go diff --git a/test/status_test.go b/test/v1alpha1/status_test.go similarity index 100% rename from test/status_test.go rename to test/v1alpha1/status_test.go diff --git a/test/taskrun_test.go b/test/v1alpha1/taskrun_test.go similarity index 100% rename from test/taskrun_test.go rename to test/v1alpha1/taskrun_test.go diff --git a/test/timeout_test.go b/test/v1alpha1/timeout_test.go similarity index 100% rename from test/timeout_test.go rename to test/v1alpha1/timeout_test.go diff --git a/test/v1alpha1/wait.go b/test/v1alpha1/wait.go new file mode 100644 index 00000000000..baddeec3021 --- /dev/null +++ b/test/v1alpha1/wait.go @@ -0,0 +1,223 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Poll Pipeline resources + +After creating Pipeline resources or making changes to them, you will need to +wait for the system to realize those changes. You can use polling methods to +check the resources reach the desired state. + +The WaitFor* functions use the kubernetes +wait package (https://godoc.org/k8s.io/apimachinery/pkg/util/wait). To poll +they use +PollImmediate (https://godoc.org/k8s.io/apimachinery/pkg/util/wait#PollImmediate) +and the return values of the function you provide behave the same as +ConditionFunc (https://godoc.org/k8s.io/apimachinery/pkg/util/wait#ConditionFunc): +a boolean to indicate if the function should stop or continue polling, and an +error to indicate if there has been an error. + + +For example, you can poll a TaskRun object to wait for it to have a Status.Condition: + + err = WaitForTaskRunState(c, hwTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { + if len(tr.Status.Conditions) > 0 { + return true, nil + } + return false, nil + }, "TaskRunHasCondition") + +*/ +package test + +import ( + "context" + "fmt" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "go.opencensus.io/trace" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/apis" +) + +const ( + interval = 1 * time.Second + timeout = 10 * time.Minute +) + +// TaskRunStateFn is a condition function on TaskRun used polling functions +type TaskRunStateFn func(r *v1alpha1.TaskRun) (bool, error) + +// PipelineRunStateFn is a condition function on TaskRun used polling functions +type PipelineRunStateFn func(pr *v1alpha1.PipelineRun) (bool, error) + +// WaitForTaskRunState polls the status of the TaskRun called name from client every +// interval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForTaskRunState(c *clients, name string, inState TaskRunStateFn, desc string) error { + metricName := fmt.Sprintf("WaitForTaskRunState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + r, err := c.TaskRunClient.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(r) + }) +} + +// WaitForDeploymentState polls the status of the Deployment called name +// from client every interval until inState returns `true` indicating it is done, +// returns an error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForDeploymentState(c *clients, name string, namespace string, inState func(d *appsv1.Deployment) (bool, error), desc string) error { + metricName := fmt.Sprintf("WaitForDeploymentState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + d, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(d) + }) +} + +// WaitForPodState polls the status of the Pod called name from client every +// interval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForPodState(c *clients, name string, namespace string, inState func(r *corev1.Pod) (bool, error), desc string) error { + metricName := fmt.Sprintf("WaitForPodState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + r, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(r) + }) +} + +// WaitForPipelineRunState polls the status of the PipelineRun called name from client every +// interval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForPipelineRunState(c *clients, name string, polltimeout time.Duration, inState PipelineRunStateFn, desc string) error { + metricName := fmt.Sprintf("WaitForPipelineRunState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, polltimeout, func() (bool, error) { + r, err := c.PipelineRunClient.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(r) + }) +} + +// WaitForServiceExternalIPState polls the status of the a k8s Service called name from client every +// interval until an external ip is assigned indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForServiceExternalIPState(c *clients, namespace, name string, inState func(s *corev1.Service) (bool, error), desc string) error { + metricName := fmt.Sprintf("WaitForServiceExternalIPState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(r) + }) +} + +// TaskRunSucceed provides a poll condition function that checks if the TaskRun +// has successfully completed. +func TaskRunSucceed(name string) TaskRunStateFn { + return func(tr *v1alpha1.TaskRun) (bool, error) { + c := tr.Status.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } else if c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("task run %q failed", name) + } + } + return false, nil + } +} + +// TaskRunFailed provides a poll condition function that checks if the TaskRun +// has failed. +func TaskRunFailed(name string) TaskRunStateFn { + return func(tr *v1alpha1.TaskRun) (bool, error) { + c := tr.Status.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, fmt.Errorf("task run %q succeeded", name) + } else if c.Status == corev1.ConditionFalse { + return true, nil + } + } + return false, nil + } +} + +// PipelineRunSucceed provides a poll condition function that checks if the PipelineRun +// has successfully completed. +func PipelineRunSucceed(name string) PipelineRunStateFn { + return func(pr *v1alpha1.PipelineRun) (bool, error) { + c := pr.Status.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } else if c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("pipeline run %q failed", name) + } + } + return false, nil + } +} + +// PipelineRunFailed provides a poll condition function that checks if the PipelineRun +// has failed. +func PipelineRunFailed(name string) PipelineRunStateFn { + return func(tr *v1alpha1.PipelineRun) (bool, error) { + c := tr.Status.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, fmt.Errorf("task run %q succeeded", name) + } else if c.Status == corev1.ConditionFalse { + return true, nil + } + } + return false, nil + } +} diff --git a/test/v1alpha1/wait_example_test.go b/test/v1alpha1/wait_example_test.go new file mode 100644 index 00000000000..21090312754 --- /dev/null +++ b/test/v1alpha1/wait_example_test.go @@ -0,0 +1,62 @@ +// +build examples + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" +) + +var ( + // This is a "hack" to make the example "look" like tests. + // Golang Example functions do not take `t *testing.T` as argument, so we "fake" + // it so that examples still compiles (`go test` tries to compile those) and look + // nice in the go documentation. + t testingT + c *clients +) + +type testingT interface { + Errorf(string, ...interface{}) +} + +func ExampleWaitForTaskRunState() { + // […] setup the test, get clients + if err := WaitForTaskRunState(c, "taskRunName", func(tr *v1alpha1.TaskRun) (bool, error) { + if len(tr.Status.Conditions) > 0 { + return true, nil + } + return false, nil + }, "TaskRunHasCondition"); err != nil { + t.Errorf("Error waiting for TaskRun taskRunName to finish: %s", err) + } +} + +func ExampleWaitForPipelineRunState() { + // […] setup the test, get clients + if err := WaitForPipelineRunState(c, "pipelineRunName", 1*time.Minute, func(pr *v1alpha1.PipelineRun) (bool, error) { + if len(pr.Status.Conditions) > 0 { + return true, nil + } + return false, nil + }, "PipelineRunHasCondition"); err != nil { + t.Errorf("Error waiting for PipelineRun pipelineRunName to finish: %s", err) + } +} diff --git a/test/wait_test.go b/test/v1alpha1/wait_test.go similarity index 100% rename from test/wait_test.go rename to test/v1alpha1/wait_test.go diff --git a/test/workingdir_test.go b/test/v1alpha1/workingdir_test.go similarity index 100% rename from test/workingdir_test.go rename to test/v1alpha1/workingdir_test.go diff --git a/test/v1alpha1/workspace_test.go b/test/v1alpha1/workspace_test.go new file mode 100644 index 00000000000..01f5b2f2776 --- /dev/null +++ b/test/v1alpha1/workspace_test.go @@ -0,0 +1,186 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "strings" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck/v1beta1" + knativetest "knative.dev/pkg/test" +) + +func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { + c, namespace := setup(t) + + taskName := "write-disallowed" + taskRunName := "write-disallowed-tr" + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(taskName, namespace, tb.TaskSpec( + tb.Step("alpine", tb.StepScript("echo foo > /workspace/test/file")), + tb.TaskWorkspace("test", "test workspace", "/workspace/test", true), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + taskRun := tb.TaskRun(taskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef(taskName), tb.TaskRunServiceAccountName("default"), + tb.TaskRunWorkspaceEmptyDir("test", ""), + )) + if _, err := c.TaskRunClient.Create(taskRun); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to finish", namespace) + if err := WaitForTaskRunState(c, taskRunName, TaskRunFailed(taskRunName), "error"); err != nil { + t.Errorf("Error waiting for TaskRun to finish with error: %s", err) + } + + tr, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + if tr.Status.PodName == "" { + t.Fatal("Error getting a PodName (empty)") + } + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + + if err != nil { + t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) + } + for _, stat := range p.Status.ContainerStatuses { + if strings.Contains(stat.Name, "step-attempt-write") { + req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) + logContent, err := req.Do().Raw() + if err != nil { + t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) + } + if !strings.Contains(string(logContent), "Read-only file system") { + t.Fatalf("Expected read-only file system error but received %v", logContent) + } + } + } +} + +func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { + c, namespace := setup(t) + + taskName := "read-workspace" + pipelineName := "read-workspace-pipeline" + pipelineRunName := "read-workspace-pipelinerun" + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(taskName, namespace, tb.TaskSpec( + tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), + tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), + )) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline: %s", err) + } + + pipelineRun := tb.PipelineRun(pipelineRunName, namespace, + tb.PipelineRunSpec( + pipelineName, + // These are the duplicated workspace entries that are being tested. + tb.PipelineRunWorkspaceBindingEmptyDir("foo"), + tb.PipelineRunWorkspaceBindingEmptyDir("foo"), + ), + ) + _, err := c.PipelineRunClient.Create(pipelineRun) + + if err == nil || !strings.Contains(err.Error(), "provided by pipelinerun more than once") { + t.Fatalf("Expected error when creating pipelinerun with duplicate workspace entries but received: %v", err) + } +} + +func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { + c, namespace := setup(t) + + taskName := "read-workspace" + pipelineName := "read-workspace-pipeline" + pipelineRunName := "read-workspace-pipelinerun" + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(taskName, namespace, tb.TaskSpec( + tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), + tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), + )) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline: %s", err) + } + + pipelineRun := tb.PipelineRun(pipelineRunName, namespace, + tb.PipelineRunSpec( + pipelineName, + ), + ) + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun: %s", err) + } + + conditions := make(v1beta1.Conditions, 0) + + if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, func(pr *v1alpha1.PipelineRun) (bool, error) { + if len(pr.Status.Conditions) > 0 { + conditions = pr.Status.Conditions + return true, nil + } + return false, nil + }, "PipelineRunHasCondition"); err != nil { + t.Fatalf("Failed to wait for PipelineRun %q to finish: %s", pipelineRunName, err) + } + + for _, condition := range conditions { + if condition.Type == apis.ConditionSucceeded && condition.Status == corev1.ConditionFalse && strings.Contains(condition.Message, `pipeline expects workspace with name "foo" be provided by pipelinerun`) { + return + } + } + + t.Fatalf("Expected failure condition after creating pipelinerun with missing workspace but did not receive one.") +} diff --git a/test/wait.go b/test/wait.go index baddeec3021..38f08610a28 100644 --- a/test/wait.go +++ b/test/wait.go @@ -48,7 +48,7 @@ import ( "fmt" "time" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "go.opencensus.io/trace" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -63,10 +63,10 @@ const ( ) // TaskRunStateFn is a condition function on TaskRun used polling functions -type TaskRunStateFn func(r *v1alpha1.TaskRun) (bool, error) +type TaskRunStateFn func(r *v1beta1.TaskRun) (bool, error) // PipelineRunStateFn is a condition function on TaskRun used polling functions -type PipelineRunStateFn func(pr *v1alpha1.PipelineRun) (bool, error) +type PipelineRunStateFn func(pr *v1beta1.PipelineRun) (bool, error) // WaitForTaskRunState polls the status of the TaskRun called name from client every // interval until inState returns `true` indicating it is done, returns an @@ -161,7 +161,7 @@ func WaitForServiceExternalIPState(c *clients, namespace, name string, inState f // TaskRunSucceed provides a poll condition function that checks if the TaskRun // has successfully completed. func TaskRunSucceed(name string) TaskRunStateFn { - return func(tr *v1alpha1.TaskRun) (bool, error) { + return func(tr *v1beta1.TaskRun) (bool, error) { c := tr.Status.GetCondition(apis.ConditionSucceeded) if c != nil { if c.Status == corev1.ConditionTrue { @@ -177,7 +177,7 @@ func TaskRunSucceed(name string) TaskRunStateFn { // TaskRunFailed provides a poll condition function that checks if the TaskRun // has failed. func TaskRunFailed(name string) TaskRunStateFn { - return func(tr *v1alpha1.TaskRun) (bool, error) { + return func(tr *v1beta1.TaskRun) (bool, error) { c := tr.Status.GetCondition(apis.ConditionSucceeded) if c != nil { if c.Status == corev1.ConditionTrue { @@ -193,7 +193,7 @@ func TaskRunFailed(name string) TaskRunStateFn { // PipelineRunSucceed provides a poll condition function that checks if the PipelineRun // has successfully completed. func PipelineRunSucceed(name string) PipelineRunStateFn { - return func(pr *v1alpha1.PipelineRun) (bool, error) { + return func(pr *v1beta1.PipelineRun) (bool, error) { c := pr.Status.GetCondition(apis.ConditionSucceeded) if c != nil { if c.Status == corev1.ConditionTrue { @@ -209,7 +209,7 @@ func PipelineRunSucceed(name string) PipelineRunStateFn { // PipelineRunFailed provides a poll condition function that checks if the PipelineRun // has failed. func PipelineRunFailed(name string) PipelineRunStateFn { - return func(tr *v1alpha1.PipelineRun) (bool, error) { + return func(tr *v1beta1.PipelineRun) (bool, error) { c := tr.Status.GetCondition(apis.ConditionSucceeded) if c != nil { if c.Status == corev1.ConditionTrue { diff --git a/test/wait_example_test.go b/test/wait_example_test.go index 21090312754..8df8ef3952d 100644 --- a/test/wait_example_test.go +++ b/test/wait_example_test.go @@ -39,7 +39,7 @@ type testingT interface { func ExampleWaitForTaskRunState() { // […] setup the test, get clients - if err := WaitForTaskRunState(c, "taskRunName", func(tr *v1alpha1.TaskRun) (bool, error) { + if err := WaitForTaskRunState(c, "taskRunName", func(tr *v1beta1.TaskRun) (bool, error) { if len(tr.Status.Conditions) > 0 { return true, nil } @@ -51,7 +51,7 @@ func ExampleWaitForTaskRunState() { func ExampleWaitForPipelineRunState() { // […] setup the test, get clients - if err := WaitForPipelineRunState(c, "pipelineRunName", 1*time.Minute, func(pr *v1alpha1.PipelineRun) (bool, error) { + if err := WaitForPipelineRunState(c, "pipelineRunName", 1*time.Minute, func(pr *v1beta1.PipelineRun) (bool, error) { if len(pr.Status.Conditions) > 0 { return true, nil } diff --git a/test/workspace_test.go b/test/workspace_test.go index 01f5b2f2776..a5526238f27 100644 --- a/test/workspace_test.go +++ b/test/workspace_test.go @@ -23,12 +23,11 @@ import ( "testing" "time" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - tb "github.com/tektoncd/pipeline/test/builder" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" - "knative.dev/pkg/apis/duck/v1beta1" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" knativetest "knative.dev/pkg/test" ) @@ -41,18 +40,36 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) defer tearDown(t, c, namespace) - task := tb.Task(taskName, namespace, tb.TaskSpec( - tb.Step("alpine", tb.StepScript("echo foo > /workspace/test/file")), - tb.TaskWorkspace("test", "test workspace", "/workspace/test", true), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{Image: "alpine"}, + Script: "echo foo > /workspace/test/file", + }}, + Workspaces: []v1beta1.WorkspaceDeclaration{{ + Name: "test", + Description: "test workspace", + MountPath: "/workspace/test", + ReadOnly: true, + }}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - taskRun := tb.TaskRun(taskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef(taskName), tb.TaskRunServiceAccountName("default"), - tb.TaskRunWorkspaceEmptyDir("test", ""), - )) + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: taskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: taskName}, + ServiceAccountName: "default", + Workspaces: []v1beta1.WorkspaceBinding{{ + Name: "test", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + }, + } if _, err := c.TaskRunClient.Create(taskRun); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } @@ -98,30 +115,58 @@ func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) defer tearDown(t, c, namespace) - task := tb.Task(taskName, namespace, tb.TaskSpec( - tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), - tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{Image: "alpine"}, + Script: "cat /workspace/test/file", + }}, + Workspaces: []v1beta1.WorkspaceDeclaration{{ + Name: "test", + Description: "test workspace", + MountPath: "/workspace/test/file", + ReadOnly: true, + }}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( - tb.PipelineWorkspaceDeclaration("foo"), - tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), - )) + pipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineName, Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Workspaces: []v1beta1.WorkspacePipelineDeclaration{{ + Name: "foo", + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "task1", + TaskRef: &v1beta1.TaskRef{Name: taskName}, + Workspaces: []v1beta1.WorkspacePipelineTaskBinding{{ + Name: "test", + Workspace: "foo", + }}, + }}, + }, + } if _, err := c.PipelineClient.Create(pipeline); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } - pipelineRun := tb.PipelineRun(pipelineRunName, namespace, - tb.PipelineRunSpec( - pipelineName, - // These are the duplicated workspace entries that are being tested. - tb.PipelineRunWorkspaceBindingEmptyDir("foo"), - tb.PipelineRunWorkspaceBindingEmptyDir("foo"), - ), - ) + pipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName, Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: pipelineName}, + Workspaces: []v1beta1.WorkspaceBinding{{ + Name: "foo", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, { + Name: "foo", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + }, + } _, err := c.PipelineRunClient.Create(pipelineRun) if err == nil || !strings.Contains(err.Error(), "provided by pipelinerun more than once") { @@ -139,34 +184,58 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) defer tearDown(t, c, namespace) - task := tb.Task(taskName, namespace, tb.TaskSpec( - tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), - tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{Image: "alpine"}, + Script: "cat /workspace/test/file", + }}, + Workspaces: []v1beta1.WorkspaceDeclaration{{ + Name: "test", + Description: "test workspace", + MountPath: "/workspace/test/file", + ReadOnly: true, + }}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( - tb.PipelineWorkspaceDeclaration("foo"), - tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), - )) + pipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineName, Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Workspaces: []v1beta1.WorkspacePipelineDeclaration{{ + Name: "foo", + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "task1", + TaskRef: &v1beta1.TaskRef{Name: taskName}, + Workspaces: []v1beta1.WorkspacePipelineTaskBinding{{ + Name: "test", + Workspace: "foo", + }}, + }}, + }, + } if _, err := c.PipelineClient.Create(pipeline); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } - pipelineRun := tb.PipelineRun(pipelineRunName, namespace, - tb.PipelineRunSpec( - pipelineName, - ), - ) + pipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName, Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: pipelineName}, + }, + } if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { t.Fatalf("Failed to create PipelineRun: %s", err) } - conditions := make(v1beta1.Conditions, 0) + conditions := make(duckv1beta1.Conditions, 0) - if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, func(pr *v1alpha1.PipelineRun) (bool, error) { + if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, func(pr *v1beta1.PipelineRun) (bool, error) { if len(pr.Status.Conditions) > 0 { conditions = pr.Status.Conditions return true, nil