diff --git a/bundle/manifests/shipwright-operator.clusterserviceversion.yaml b/bundle/manifests/shipwright-operator.clusterserviceversion.yaml index e737cf20..3a734b74 100644 --- a/bundle/manifests/shipwright-operator.clusterserviceversion.yaml +++ b/bundle/manifests/shipwright-operator.clusterserviceversion.yaml @@ -426,9 +426,13 @@ spec: resources: - customresourcedefinitions verbs: + - create - delete + - get + - list - patch - update + - watch - apiGroups: - apps resources: @@ -739,6 +743,19 @@ spec: - delete - patch - update + - apiGroups: + - shipwright.io + resources: + - buildstrategies + - clusterbuildstrategies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - authentication.k8s.io resources: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 407ed6ea..fca484b2 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -48,9 +48,13 @@ rules: resources: - customresourcedefinitions verbs: + - create - delete + - get + - list - patch - update + - watch - apiGroups: - apps resources: @@ -361,3 +365,16 @@ rules: - delete - patch - update +- apiGroups: + - shipwright.io + resources: + - buildstrategies + - clusterbuildstrategies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/controllers/add_buildstrategy.go b/controllers/add_buildstrategy.go new file mode 100644 index 00000000..b773c3f4 --- /dev/null +++ b/controllers/add_buildstrategy.go @@ -0,0 +1,14 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package controllers + +import ( + "github.com/shipwright-io/operator/controllers/buildstrategy" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, buildstrategy.Add) +} diff --git a/controllers/buildstrategy/buildstrategy_controller.go b/controllers/buildstrategy/buildstrategy_controller.go new file mode 100644 index 00000000..ab1145a0 --- /dev/null +++ b/controllers/buildstrategy/buildstrategy_controller.go @@ -0,0 +1,139 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildstrategy + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "github.com/manifestival/manifestival" + corev1 "k8s.io/api/core/v1" + crdclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + v1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" + "github.com/shipwright-io/operator/api/v1alpha1" + commonctrl "github.com/shipwright-io/operator/controllers/common" + "github.com/shipwright-io/operator/pkg/reconciler/build" +) + +// BuildStrategyReconciler reconciles a ShipwrightBuild object +type BuildStrategyReconciler struct { + client.Client // controller kubernetes client + CRDClient crdclientv1.ApiextensionsV1Interface + + Logger logr.Logger // decorated logger + Scheme *runtime.Scheme // runtime scheme + Manifest manifestival.Manifest // release manifests render +} + +// Add creates a new buildStrategy Controller and adds it to the Manager. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + r, err := newReconciler(mgr) + if err != nil { + return err + } + return add(mgr, r) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) (*BuildStrategyReconciler, error) { + c := mgr.GetClient() + scheme := mgr.GetScheme() + logger := ctrl.Log.WithName("controllers").WithName("buildstrategy") + + crdClient, err := crdclientv1.NewForConfig(mgr.GetConfig()) + if err != nil { + logger.Error(err, "unable to get crd client") + return nil, err + } + + return &BuildStrategyReconciler{ + CRDClient: crdClient, + Client: c, + Scheme: scheme, + Logger: logger, + }, nil +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r *BuildStrategyReconciler) error { + + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.ShipwrightBuild{}, builder.WithPredicates(predicate.Funcs{ + CreateFunc: func(ce event.CreateEvent) bool { + // all new objects must be subject to reconciliation + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // objects that haven't been confirmed deleted must be subject to reconciliation + return !e.DeleteStateUnknown + }, + UpdateFunc: func(e event.UpdateEvent) bool { + // objects that have updated generation must be subject to reconciliation + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + })). + Complete(r) +} + +// Reconcile performs the resource reconciliation steps to deploy or remove Shipwright Build +// instances. When deletion-timestamp is found, the removal of the previously deploy resources is +// executed, otherwise the regular deploy workflow takes place. +func (r *BuildStrategyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := r.Logger.WithValues("namespace", req.Namespace, "name", req.Name) + + // retrieving the ShipwrightBuild instance requested for reconcile + b := &v1alpha1.ShipwrightBuild{} + if err := r.Get(ctx, req.NamespacedName, b); err != nil { + if errors.IsNotFound(err) { + logger.Info("Resource is not found!") + return commonctrl.NoRequeue() + } + logger.Error(err, "retrieving ShipwrightBuild object from cache") + return commonctrl.RequeueOnError(err) + } + + // Check targetNamespace is created + targetNamespace := b.Spec.TargetNamespace + ns := &corev1.Namespace{} + if err := r.Get(ctx, types.NamespacedName{Name: targetNamespace}, ns); err != nil { + if !errors.IsNotFound(err) { + logger.Info("retrieving target namespace %s error: %s", targetNamespace, err.Error()) + return commonctrl.RequeueAfterWithError(err) + } + } + + // Reconcile BuildStrategy + requeue, err := build.ReconcileBuildStrategy(ctx, r.CRDClient, r.Client, r.Logger, targetNamespace) + if err != nil { + return commonctrl.RequeueAfterWithError(err) + } + if requeue { + return commonctrl.Requeue() + } + + logger.Info("All done!") + return commonctrl.NoRequeue() +} + +func (r *BuildStrategyReconciler) GetBuildStrategy(namespaced types.NamespacedName) (*v1beta1.ClusterBuildStrategy, error) { + // look up storage class by name + cls := &v1beta1.ClusterBuildStrategy{} + if err := r.Get(context.TODO(), namespaced, cls); err != nil { + return nil, fmt.Errorf("Unable to retrieve cls class") + } + return cls, nil +} diff --git a/controllers/buildstrategy/buildstrategy_controller_test.go b/controllers/buildstrategy/buildstrategy_controller_test.go new file mode 100644 index 00000000..43f3709c --- /dev/null +++ b/controllers/buildstrategy/buildstrategy_controller_test.go @@ -0,0 +1,160 @@ +package buildstrategy + +import ( + "context" + "testing" + + o "github.com/onsi/gomega" + v1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" + "github.com/shipwright-io/operator/api/v1alpha1" + commonctrl "github.com/shipwright-io/operator/controllers/common" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + crdclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// bootstrapBuildStrategyReconciler start up a new instance of BuildStrategyReconciler which is +// ready to interact with Manifestival, returning the Manifestival instance and the client. +func bootstrapBuildStrategyReconciler( + t *testing.T, + b *v1alpha1.ShipwrightBuild, + tcrds []*crdv1.CustomResourceDefinition, +) (client.Client, *crdclientv1.Clientset, *BuildStrategyReconciler) { + g := o.NewGomegaWithT(t) + + s := runtime.NewScheme() + s.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.Namespace{}) + s.AddKnownTypes(appsv1.SchemeGroupVersion, &appsv1.Deployment{}) + s.AddKnownTypes(v1beta1.SchemeGroupVersion, &v1beta1.ClusterBuildStrategy{}) + s.AddKnownTypes(v1beta1.SchemeGroupVersion, &v1beta1.BuildStrategy{}) + s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.ShipwrightBuild{}) + + logger := zap.New() + + // create fake webhook deployment(prerequisite of build strategy installation) + webhookdep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Namespace: b.Spec.TargetNamespace, Name: "shipwright-build-webhook"}, + Status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + }}, + }, + } + c := fake.NewClientBuilder().WithScheme(s).WithObjects(b, webhookdep).WithStatusSubresource(b, webhookdep).Build() + + var crdClient *crdclientv1.Clientset + if len(tcrds) > 0 { + objs := []runtime.Object{} + for _, obj := range tcrds { + objs = append(objs, obj) + } + crdClient = crdclientv1.NewSimpleClientset(objs...) + } else { + crdClient = crdclientv1.NewSimpleClientset() + } + + r := &BuildStrategyReconciler{CRDClient: crdClient.ApiextensionsV1(), Client: c, Scheme: s, Logger: logger} + + if b.Spec.TargetNamespace != "" { + t.Logf("Creating test namespace '%s'", b.Spec.TargetNamespace) + t.Run("create-test-namespace", func(t *testing.T) { + err := c.Create( + context.TODO(), + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: b.Spec.TargetNamespace}}, + &client.CreateOptions{}, + ) + g.Expect(err).To(o.BeNil()) + }) + } + + return c, crdClient, r +} + +// testBuildStrategyReconcilerReconcile simulates the reconciliation process for rolling out and +// rolling back manifests in the informed target namespace name. +func testBuildStrategyReconcilerReconcile(t *testing.T, targetNamespace string) { + g := o.NewGomegaWithT(t) + + namespacedName := types.NamespacedName{Namespace: "default", Name: "name"} + clsName := types.NamespacedName{ + Name: "kaniko", + } + + req := reconcile.Request{NamespacedName: namespacedName} + + b := &v1alpha1.ShipwrightBuild{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + }, + Spec: v1alpha1.ShipwrightBuildSpec{ + TargetNamespace: targetNamespace, + }, + } + crd1 := &crdv1.CustomResourceDefinition{} + crd1.Name = "clusterbuildstrategies.shipwright.io" + crd2 := &crdv1.CustomResourceDefinition{} + crd2.Name = "buildstrategies.shipwright.io" + crds := []*crdv1.CustomResourceDefinition{crd1, crd2} + _, _, r := bootstrapBuildStrategyReconciler(t, b, crds) + + t.Logf("Deploying BuildStrategy Controller against '%s' namespace", targetNamespace) + + // rolling out all manifests on the desired namespace, making sure the build cluster strategies are created + t.Run("rollout-manifests", func(t *testing.T) { + ctx := context.TODO() + res, err := r.Reconcile(ctx, req) + g.Expect(err).To(o.BeNil()) + g.Expect(res.Requeue).To(o.BeFalse()) + err = r.Get(ctx, clsName, &v1beta1.ClusterBuildStrategy{}) + g.Expect(err).To(o.BeNil()) + }) + + // rolling back all changes, making sure the build cluster strategies are also not found afterwards + t.Run("rollback-manifests", func(t *testing.T) { + ctx := context.TODO() + + err := r.Get(ctx, namespacedName, b) + g.Expect(err).To(o.BeNil()) + + /*b.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) + err = r.Update(ctx, b, &client.UpdateOptions{Raw: &metav1.UpdateOptions{}}) + g.Expect(err).To(o.BeNil()) + + res, err := r.Reconcile(ctx, req) + g.Expect(err).To(o.BeNil()) + g.Expect(res.Requeue).To(o.BeFalse()) + + err = r.Get(ctx, clsName, &v1beta1.ClusterBuildStrategy{}) + g.Expect(errors.IsNotFound(err)).To(o.BeTrue())*/ + }) +} + +// TestBuildStrategyReconciler_Reconcile runs rollout/rollback tests against different namespaces. +func TestBuildStrategyReconciler_Reconcile(t *testing.T) { + tests := []struct { + testName string + targetNamespace string + }{{ + testName: "target namespace is informed", + targetNamespace: "namespace", + }, { + testName: "target namespace is not informed", + targetNamespace: commonctrl.DefaultTargetNamespace, + }} + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + testBuildStrategyReconcilerReconcile(t, tt.targetNamespace) + }) + } +} diff --git a/controllers/controller_rbac.go b/controllers/controller_rbac.go index 3ece07b6..b6808904 100644 --- a/controllers/controller_rbac.go +++ b/controllers/controller_rbac.go @@ -11,7 +11,7 @@ package controllers // +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create // +kubebuilder:rbac:groups=core,resources=serviceaccounts,resourceNames=shipwright-build-controller,verbs=update;patch;delete // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch;create -// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,resourceNames=builds.shipwright.io;buildruns.shipwright.io;buildstrategies.shipwright.io;clusterbuildstrategies.shipwright.io,verbs=update;patch;delete +// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,resourceNames=builds.shipwright.io;buildruns.shipwright.io;buildstrategies.shipwright.io;clusterbuildstrategies.shipwright.io,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,resourceNames=shipwright-build-aggregate-edit,verbs=update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,resourceNames=shipwright-build-aggregate-view,verbs=update;patch;delete @@ -40,3 +40,4 @@ package controllers // +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create // +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,resourceNames=shipwright-build-webhook-cert,verbs=update;patch;delete // +kubebuilder:rbac:groups=core,resources=pods;events;configmaps;secrets;limitranges;namespaces;services,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=shipwright.io,resources=clusterbuildstrategies;buildstrategies,verbs=get;list;watch;create;update;patch;delete diff --git a/kodata/buildstrategy/buildah/buildstrategy_buildah_shipwright_managed_push_cr.yaml b/kodata/buildstrategy/buildah/buildstrategy_buildah_shipwright_managed_push_cr.yaml new file mode 100644 index 00000000..b40a3576 --- /dev/null +++ b/kodata/buildstrategy/buildah/buildstrategy_buildah_shipwright_managed_push_cr.yaml @@ -0,0 +1,208 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: buildah-shipwright-managed-push +spec: + steps: + - name: build + image: quay.io/containers/buildah:v1.32.0 + workingDir: $(params.shp-source-root) + securityContext: + privileged: true + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + # Parse parameters + context= + dockerfile= + image= + target= + buildArgs=() + inBuildArgs=false + registriesBlock="" + inRegistriesBlock=false + registriesInsecure="" + inRegistriesInsecure=false + registriesSearch="" + inRegistriesSearch=false + while [[ $# -gt 0 ]]; do + arg="$1" + shift + + if [ "${arg}" == "--context" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + context="$1" + shift + elif [ "${arg}" == "--dockerfile" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + dockerfile="$1" + shift + elif [ "${arg}" == "--image" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + image="$1" + shift + elif [ "${arg}" == "--target" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + target="$1" + shift + elif [ "${arg}" == "--build-args" ]; then + inBuildArgs=true + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-block" ]; then + inRegistriesBlock=true + inBuildArgs=false + inRegistriesInsecure=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-insecure" ]; then + inRegistriesInsecure=true + inBuildArgs=false + inRegistriesBlock=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-search" ]; then + inRegistriesSearch=true + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + elif [ "${inBuildArgs}" == "true" ]; then + buildArgs+=("--build-arg" "${arg}") + elif [ "${inRegistriesBlock}" == "true" ]; then + registriesBlock="${registriesBlock}'${arg}', " + elif [ "${inRegistriesInsecure}" == "true" ]; then + registriesInsecure="${registriesInsecure}'${arg}', " + elif [ "${inRegistriesSearch}" == "true" ]; then + registriesSearch="${registriesSearch}'${arg}', " + else + echo "Invalid usage" + exit 1 + fi + done + + # Verify the existence of the context directory + if [ ! -d "${context}" ]; then + echo -e "The context directory '${context}' does not exist." + echo -n "ContextDirNotFound" > '$(results.shp-error-reason.path)' + echo -n "The context directory '${context}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + cd "${context}" + + # Verify the existence of the Dockerfile + if [ ! -f "${dockerfile}" ]; then + echo -e "The Dockerfile '${dockerfile}' does not exist." + echo -n "DockerfileNotFound" > '$(results.shp-error-reason.path)' + echo -n "The Dockerfile '${dockerfile}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + + echo "[INFO] Creating registries config file..." + if [ "${registriesSearch}" != "" ]; then + cat <>/tmp/registries.conf + [registries.search] + registries = [${registriesSearch::-2}] + + EOF + fi + if [ "${registriesInsecure}" != "" ]; then + cat <>/tmp/registries.conf + [registries.insecure] + registries = [${registriesInsecure::-2}] + + EOF + fi + if [ "${registriesBlock}" != "" ]; then + cat <>/tmp/registries.conf + [registries.block] + registries = [${registriesBlock::-2}] + + EOF + fi + + # Building the image + echo "[INFO] Building image ${image}" + buildah --storage-driver=$(params.storage-driver) \ + bud "${buildArgs[@]}" \ + --registries-conf=/tmp/registries.conf \ + --tag="${image}" \ + --file="${dockerfile}" \ + . + + # Write the image + echo "[INFO] Writing image ${image}" + buildah --storage-driver=$(params.storage-driver) push \ + "${image}" \ + "oci:${target}" + # That's the separator between the shell script and its args + - -- + - --context + - $(params.shp-source-context) + - --dockerfile + - $(params.dockerfile) + - --image + - $(params.shp-output-image) + - --build-args + - $(params.build-args[*]) + - --registries-block + - $(params.registries-block[*]) + - --registries-insecure + - $(params.registries-insecure[*]) + - --registries-search + - $(params.registries-search[*]) + - --target + - $(params.shp-output-directory) + resources: + limits: + cpu: "1" + memory: 2Gi + requests: + cpu: 250m + memory: 65Mi + parameters: + - name: build-args + description: "The values for the args in the Dockerfile. Values must be in the format KEY=VALUE." + type: array + defaults: [] + - name: registries-block + description: The registries that need to block pull access. + type: array + defaults: [] + - name: registries-insecure + description: The fully-qualified name of insecure registries. An insecure registry is one that does not have a valid SSL certificate or only supports HTTP. + type: array + defaults: [] + - name: registries-search + description: The registries for searching short name images such as `golang:latest`. + type: array + defaults: + - docker.io + - quay.io + - name: dockerfile + description: The path to the Dockerfile to be used for building the image. + type: string + default: "Dockerfile" + - name: storage-driver + description: "The storage driver to use, such as 'overlay' or 'vfs'." + type: string + default: "vfs" + # For details see the "--storage-driver" section of https://github.com/containers/buildah/blob/main/docs/buildah.1.md#options + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/kodata/buildstrategy/buildah/buildstrategy_buildah_strategy_managed_push_cr.yaml b/kodata/buildstrategy/buildah/buildstrategy_buildah_strategy_managed_push_cr.yaml new file mode 100644 index 00000000..4434a6f4 --- /dev/null +++ b/kodata/buildstrategy/buildah/buildstrategy_buildah_strategy_managed_push_cr.yaml @@ -0,0 +1,208 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: buildah-strategy-managed-push +spec: + steps: + - name: build-and-push + image: quay.io/containers/buildah:v1.32.0 + workingDir: $(params.shp-source-root) + securityContext: + capabilities: + add: + - "SETFCAP" + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + # Parse parameters + context= + dockerfile= + image= + buildArgs=() + inBuildArgs=false + registriesBlock="" + inRegistriesBlock=false + registriesInsecure="" + inRegistriesInsecure=false + registriesSearch="" + inRegistriesSearch=false + tlsVerify=true + while [[ $# -gt 0 ]]; do + arg="$1" + shift + + if [ "${arg}" == "--context" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + context="$1" + shift + elif [ "${arg}" == "--dockerfile" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + dockerfile="$1" + shift + elif [ "${arg}" == "--image" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + image="$1" + shift + elif [ "${arg}" == "--build-args" ]; then + inBuildArgs=true + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-block" ]; then + inRegistriesBlock=true + inBuildArgs=false + inRegistriesInsecure=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-insecure" ]; then + inRegistriesInsecure=true + inBuildArgs=false + inRegistriesBlock=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-search" ]; then + inRegistriesSearch=true + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + elif [ "${inBuildArgs}" == "true" ]; then + buildArgs+=("--build-arg" "${arg}") + elif [ "${inRegistriesBlock}" == "true" ]; then + registriesBlock="${registriesBlock}'${arg}', " + elif [ "${inRegistriesInsecure}" == "true" ]; then + registriesInsecure="${registriesInsecure}'${arg}', " + + # This assumes that the image is passed before the insecure registries which is fair in this context + if [[ ${image} == ${arg}/* ]]; then + tlsVerify=false + fi + elif [ "${inRegistriesSearch}" == "true" ]; then + registriesSearch="${registriesSearch}'${arg}', " + else + echo "Invalid usage" + exit 1 + fi + done + + # Verify the existence of the context directory + if [ ! -d "${context}" ]; then + echo -e "The context directory '${context}' does not exist." + echo -n "ContextDirNotFound" > '$(results.shp-error-reason.path)' + echo -n "The context directory '${context}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + cd "${context}" + + # Verify the existence of the Dockerfile + if [ ! -f "${dockerfile}" ]; then + echo -e "The Dockerfile '${dockerfile}' does not exist." + echo -n "DockerfileNotFound" > '$(results.shp-error-reason.path)' + echo -n "The Dockerfile '${dockerfile}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + + echo "[INFO] Creating registries config file..." + if [ "${registriesSearch}" != "" ]; then + cat <>/tmp/registries.conf + [registries.search] + registries = [${registriesSearch::-2}] + + EOF + fi + if [ "${registriesInsecure}" != "" ]; then + cat <>/tmp/registries.conf + [registries.insecure] + registries = [${registriesInsecure::-2}] + + EOF + fi + if [ "${registriesBlock}" != "" ]; then + cat <>/tmp/registries.conf + [registries.block] + registries = [${registriesBlock::-2}] + + EOF + fi + + # Building the image + echo "[INFO] Building image ${image}" + buildah --storage-driver=$(params.storage-driver) \ + bud "${buildArgs[@]}" \ + --registries-conf=/tmp/registries.conf \ + --tag="${image}" \ + --file="${dockerfile}" \ + . + + # Push the image + echo "[INFO] Pushing image ${image}" + buildah --storage-driver=$(params.storage-driver) push \ + --digestfile='$(results.shp-image-digest.path)' \ + --tls-verify="${tlsVerify}" \ + "${image}" \ + "docker://${image}" + # That's the separator between the shell script and its args + - -- + - --context + - $(params.shp-source-context) + - --dockerfile + - $(params.dockerfile) + - --image + - $(params.shp-output-image) + - --build-args + - $(params.build-args[*]) + - --registries-block + - $(params.registries-block[*]) + - --registries-insecure + - $(params.registries-insecure[*]) + - --registries-search + - $(params.registries-search[*]) + resources: + limits: + cpu: "1" + memory: 2Gi + requests: + cpu: 250m + memory: 65Mi + parameters: + - name: build-args + description: "The values for the args in the Dockerfile. Values must be in the format KEY=VALUE." + type: array + defaults: [] + - name: registries-block + description: The registries that need to block pull access. + type: array + defaults: [] + - name: registries-insecure + description: The fully-qualified name of insecure registries. An insecure registry is one that does not have a valid SSL certificate or only supports HTTP. + type: array + defaults: [] + - name: registries-search + description: The registries for searching short name images such as `golang:latest`. + type: array + defaults: + - docker.io + - quay.io + - name: dockerfile + description: The path to the Dockerfile to be used for building the image. + type: string + default: "Dockerfile" + - name: storage-driver + description: "The storage driver to use, such as 'overlay' or 'vfs'" + type: string + default: "vfs" + # For details see the "--storage-driver" section of https://github.com/containers/buildah/blob/main/docs/buildah.1.md#options + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/kodata/buildstrategy/buildkit/buildstrategy_buildkit_cr.yaml b/kodata/buildstrategy/buildkit/buildstrategy_buildkit_cr.yaml new file mode 100644 index 00000000..2469cf3e --- /dev/null +++ b/kodata/buildstrategy/buildkit/buildstrategy_buildkit_cr.yaml @@ -0,0 +1,174 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: buildkit + annotations: + # See https://github.com/moby/buildkit/blob/master/docs/rootless.md#about---oci-worker-no-process-sandbox for more information + container.apparmor.security.beta.kubernetes.io/step-build-and-push: unconfined + # The usage of seccomp annotation will be deprecate in k8s v1.22.0, see + # https://kubernetes.io/docs/tutorials/clusters/seccomp/#create-a-pod-with-a-seccomp-profile-for-syscall-auditing for more information + container.seccomp.security.alpha.kubernetes.io/step-build-and-push: unconfined +spec: + parameters: + - name: build-args + description: "The values for the ARGs in the Dockerfile. Values must be in the format KEY=VALUE." + type: array + defaults: [] + - name: cache + description: "Configure BuildKit's cache usage. Allowed values are 'disabled' and 'registry'. The default is 'registry'." + type: string + default: registry + - name: platforms + description: "Build the image for different platforms. By default, the image is built for the platform used by the FROM image. If that is present for multiple platforms, then it is built for the environment's platform." + type: array + defaults: [] + - name: secrets + description: "The secrets to pass to the build. Values must be in the format ID=FILE_CONTENT." + type: array + defaults: [] + - name: dockerfile + description: The path to the Dockerfile to be used for building the image. + type: string + default: "Dockerfile" + steps: + - name: build-and-push + image: moby/buildkit:nightly-rootless + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SETGID + - SETUID + workingDir: $(params.shp-source-root) + env: + - name: DOCKER_CONFIG + value: /tekton/home/.docker + - name: HOME + value: /tekton/home + # See https://github.com/moby/buildkit/blob/master/docs/rootless.md#about---oci-worker-no-process-sandbox for more information + - name: BUILDKITD_FLAGS + value: --oci-worker-no-process-sandbox + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_DOCKERFILE + value: $(params.dockerfile) + - name: PARAM_OUTPUT_DIRECTORY + value: $(params.shp-output-directory) + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + - name: PARAM_OUTPUT_INSECURE + value: $(params.shp-output-insecure) + - name: PARAM_CACHE + value: $(params.cache) + command: + - /bin/ash + args: + - -c + - | + set -euo pipefail + + # Verify the existence of the context directory + if [ ! -d "${PARAM_SOURCE_CONTEXT}" ]; then + echo -e "The context directory '${PARAM_SOURCE_CONTEXT}' does not exist." + echo -n "ContextDirNotFound" > '$(results.shp-error-reason.path)' + echo -n "The context directory '${PARAM_SOURCE_CONTEXT}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + + # Prepare the file arguments + DOCKERFILE_PATH="${PARAM_SOURCE_CONTEXT}/${PARAM_DOCKERFILE}" + DOCKERFILE_DIR="$(dirname "${DOCKERFILE_PATH}")" + DOCKERFILE_NAME="$(basename "${DOCKERFILE_PATH}")" + + # Verify the existence of the Dockerfile + if [ ! -f "${DOCKERFILE_PATH}" ]; then + echo -e "The Dockerfile '${DOCKERFILE_PATH}' does not exist." + echo -n "DockerfileNotFound" > '$(results.shp-error-reason.path)' + echo -n "The Dockerfile '${DOCKERFILE_PATH}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + + # We only have ash here and therefore no bash arrays to help add dynamic arguments (the build-args) to the build command. + + echo "#!/bin/ash" > /tmp/run.sh + echo "set -euo pipefail" >> /tmp/run.sh + echo "buildctl-daemonless.sh \\" >> /tmp/run.sh + echo "build \\" >> /tmp/run.sh + echo "--frontend=dockerfile.v0 \\" >> /tmp/run.sh + echo "--opt=filename=\"${DOCKERFILE_NAME}\" \\" >> /tmp/run.sh + echo "--local=context=\"${PARAM_SOURCE_CONTEXT}\" \\" >> /tmp/run.sh + echo "--local=dockerfile=\"${DOCKERFILE_DIR}\" \\" >> /tmp/run.sh + echo "--output=type=oci,tar=false,dest=\"${PARAM_OUTPUT_DIRECTORY}\" \\" >> /tmp/run.sh + if [ "${PARAM_CACHE}" == "registry" ]; then + echo "--export-cache=type=inline \\" >> /tmp/run.sh + echo "--import-cache=type=registry,ref=\"${PARAM_OUTPUT_IMAGE}\",registry.insecure=\"${PARAM_OUTPUT_INSECURE}\" \\" >> /tmp/run.sh + elif [ "${PARAM_CACHE}" == "disabled" ]; then + echo "--no-cache \\" >> /tmp/run.sh + else + echo -e "An invalid value for the parameter 'cache' has been provided: '${PARAM_CACHE}'. Allowed values are 'disabled' and 'registry'." + echo -n "InvalidParameterValue" > '$(results.shp-error-reason.path)' + echo -n "An invalid value for the parameter 'cache' has been provided: '${PARAM_CACHE}'. Allowed values are 'disabled' and 'registry'." > '$(results.shp-error-message.path)' + exit 1 + fi + + stage="" + platforms="" + for a in "$@" + do + if [ "${a}" == "--build-args" ]; then + stage=build-args + elif [ "${a}" == "--platforms" ]; then + stage=platforms + elif [ "${a}" == "--secrets" ]; then + stage=secrets + elif [ "${stage}" == "build-args" ]; then + echo "--opt=\"build-arg:${a}\" \\" >> /tmp/run.sh + elif [ "${stage}" == "platforms" ]; then + if [ "${platforms}" == "" ]; then + platforms="${a}" + else + platforms="${platforms},${a}" + fi + elif [ "${stage}" == "secrets" ]; then + # Split ID=FILE_CONTENT into variables id and data + + # using head because the data could be multiline + id="$(echo "${a}" | head -1 | sed 's/=.*//')" + + # This is hacky, we remove the suffix ${id}= from all lines of the data. + # If the data would be multiple lines and a line would start with ${id}= + # then we would remove it. We could force users to give us the secret + # base64 encoded. But ultimately, the best solution might be if the user + # mounts the secret and just gives us the path here. + data="$(echo "${a}" | sed "s/^${id}=//")" + + # Write the secret data into a temporary file, once we have volume support + # in the build strategy, we should use a memory based emptyDir for this. + echo -n "${data}" > "/tmp/secret_${id}" + + # Add the secret argument + echo "--secret id=${id},src="/tmp/secret_${id}" \\" >> /tmp/run.sh + fi + done + + if [ "${platforms}" != "" ]; then + echo "--opt=\"platform=${platforms}\" \\" >> /tmp/run.sh + fi + + echo "--progress=plain" >> /tmp/run.sh + + chmod +x /tmp/run.sh + /tmp/run.sh + # That's the separator between the shell script and its args + - -- + - --build-args + - $(params.build-args[*]) + - --platforms + - $(params.platforms[*]) + - --secrets + - $(params.secrets[*]) + securityContext: + runAsUser: 1000 + runAsGroup: 1000 diff --git a/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_cr.yaml b/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_cr.yaml new file mode 100644 index 00000000..11494bb4 --- /dev/null +++ b/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_cr.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: buildpacks-v3-heroku +spec: + volumes: + - name: platform-env + emptyDir: {} + parameters: + - name: platform-api-version + description: The referenced version is the minimum version that all relevant buildpack implementations support. + default: "0.7" + steps: + - name: build-and-push + image: heroku/builder:22 + env: + - name: CNB_PLATFORM_API + value: $(params.platform-api-version) + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + echo "> Processing environment variables..." + ENV_DIR="/platform/env" + + envs=($(env)) + + # Denying the creation of non required files from system environments. + # The creation of a file named PATH (corresponding to PATH system environment) + # caused failure for python source during pip install (https://github.com/Azure-Samples/python-docs-hello-world) + block_list=("PATH" "HOSTNAME" "PWD" "_" "SHLVL" "HOME" "") + + for env in "${envs[@]}"; do + blocked=false + + IFS='=' read -r key value string <<< "$env" + + for str in "${block_list[@]}"; do + if [[ "$key" == "$str" ]]; then + blocked=true + break + fi + done + + if [ "$blocked" == "false" ]; then + path="${ENV_DIR}/${key}" + echo -n "$value" > "$path" + fi + done + + LAYERS_DIR=/tmp/.shp/layers + CACHE_DIR=/tmp/.shp/cache + + mkdir -p "$CACHE_DIR" "$LAYERS_DIR" + + function announce_phase { + printf "===> %s\n" "$1" + } + + announce_phase "ANALYZING" + /cnb/lifecycle/analyzer -layers="$LAYERS_DIR" "${PARAM_OUTPUT_IMAGE}" + + announce_phase "DETECTING" + /cnb/lifecycle/detector -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + announce_phase "RESTORING" + /cnb/lifecycle/restorer -cache-dir="$CACHE_DIR" -layers="$LAYERS_DIR" + + announce_phase "BUILDING" + /cnb/lifecycle/builder -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + exporter_args=( -layers="$LAYERS_DIR" -report=/tmp/report.toml -cache-dir="$CACHE_DIR" -app="${PARAM_SOURCE_CONTEXT}") + grep -q "buildpack-default-process-type" "$LAYERS_DIR/config/metadata.toml" || exporter_args+=( -process-type web ) + + announce_phase "EXPORTING" + /cnb/lifecycle/exporter "${exporter_args[@]}" "${PARAM_OUTPUT_IMAGE}" + + # Store the image digest + grep digest /tmp/report.toml | tail -n 1 | tr -d ' \"\n' | sed s/digest=// > "$(results.shp-image-digest.path)" + volumeMounts: + - mountPath: /platform/env + name: platform-env + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 1000 + runAsGroup: 1000 diff --git a/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_namespaced_cr.yaml b/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_namespaced_cr.yaml new file mode 100644 index 00000000..c960cdd5 --- /dev/null +++ b/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_namespaced_cr.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: BuildStrategy +metadata: + name: buildpacks-v3-heroku +spec: + volumes: + - name: platform-env + emptyDir: {} + parameters: + - name: platform-api-version + description: The referenced version is the minimum version that all relevant buildpack implementations support. + default: "0.7" + steps: + - name: build-and-push + image: heroku/builder:22 + env: + - name: CNB_PLATFORM_API + value: $(params.platform-api-version) + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + echo "> Processing environment variables..." + ENV_DIR="/platform/env" + + envs=($(env)) + + # Denying the creation of non required files from system environments. + # The creation of a file named PATH (corresponding to PATH system environment) + # caused failure for python source during pip install (https://github.com/Azure-Samples/python-docs-hello-world) + block_list=("PATH" "HOSTNAME" "PWD" "_" "SHLVL" "HOME" "") + + for env in "${envs[@]}"; do + blocked=false + + IFS='=' read -r key value string <<< "$env" + + for str in "${block_list[@]}"; do + if [[ "$key" == "$str" ]]; then + blocked=true + break + fi + done + + if [ "$blocked" == "false" ]; then + path="${ENV_DIR}/${key}" + echo -n "$value" > "$path" + fi + done + + LAYERS_DIR=/tmp/.shp/layers + CACHE_DIR=/tmp/.shp/cache + + mkdir -p "$CACHE_DIR" "$LAYERS_DIR" + + function announce_phase { + printf "===> %s\n" "$1" + } + + announce_phase "ANALYZING" + /cnb/lifecycle/analyzer -layers="$LAYERS_DIR" "${PARAM_OUTPUT_IMAGE}" + + announce_phase "DETECTING" + /cnb/lifecycle/detector -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + announce_phase "RESTORING" + /cnb/lifecycle/restorer -cache-dir="$CACHE_DIR" -layers="$LAYERS_DIR" + + announce_phase "BUILDING" + /cnb/lifecycle/builder -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + exporter_args=( -layers="$LAYERS_DIR" -report=/tmp/report.toml -cache-dir="$CACHE_DIR" -app="${PARAM_SOURCE_CONTEXT}") + grep -q "buildpack-default-process-type" "$LAYERS_DIR/config/metadata.toml" || exporter_args+=( -process-type web ) + + announce_phase "EXPORTING" + /cnb/lifecycle/exporter "${exporter_args[@]}" "${PARAM_OUTPUT_IMAGE}" + + # Store the image digest + grep digest /tmp/report.toml | tail -n 1 | tr -d ' \"\n' | sed s/digest=// > "$(results.shp-image-digest.path)" + volumeMounts: + - mountPath: /platform/env + name: platform-env + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 1000 + runAsGroup: 1000 diff --git a/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_cr.yaml b/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_cr.yaml new file mode 100644 index 00000000..678a0701 --- /dev/null +++ b/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_cr.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: buildpacks-v3 +spec: + volumes: + - name: platform-env + emptyDir: {} + parameters: + - name: platform-api-version + description: The referenced version is the minimum version that all relevant buildpack implementations support. + default: "0.7" + steps: + - name: build-and-push + image: docker.io/paketobuildpacks/builder-jammy-full:latest + env: + - name: CNB_PLATFORM_API + value: $(params.platform-api-version) + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + echo "> Processing environment variables..." + ENV_DIR="/platform/env" + + envs=($(env)) + + # Denying the creation of non required files from system environments. + # The creation of a file named PATH (corresponding to PATH system environment) + # caused failure for python source during pip install (https://github.com/Azure-Samples/python-docs-hello-world) + block_list=("PATH" "HOSTNAME" "PWD" "_" "SHLVL" "HOME" "") + + for env in "${envs[@]}"; do + blocked=false + + IFS='=' read -r key value string <<< "$env" + + for str in "${block_list[@]}"; do + if [[ "$key" == "$str" ]]; then + blocked=true + break + fi + done + + if [ "$blocked" == "false" ]; then + path="${ENV_DIR}/${key}" + echo -n "$value" > "$path" + fi + done + + LAYERS_DIR=/tmp/.shp/layers + CACHE_DIR=/tmp/.shp/cache + + mkdir -p "$CACHE_DIR" "$LAYERS_DIR" + + function announce_phase { + printf "===> %s\n" "$1" + } + + announce_phase "ANALYZING" + /cnb/lifecycle/analyzer -layers="$LAYERS_DIR" "${PARAM_OUTPUT_IMAGE}" + + announce_phase "DETECTING" + /cnb/lifecycle/detector -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + announce_phase "RESTORING" + /cnb/lifecycle/restorer -cache-dir="$CACHE_DIR" -layers="$LAYERS_DIR" + + announce_phase "BUILDING" + /cnb/lifecycle/builder -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + exporter_args=( -layers="$LAYERS_DIR" -report=/tmp/report.toml -cache-dir="$CACHE_DIR" -app="${PARAM_SOURCE_CONTEXT}") + grep -q "buildpack-default-process-type" "$LAYERS_DIR/config/metadata.toml" || exporter_args+=( -process-type web ) + + announce_phase "EXPORTING" + /cnb/lifecycle/exporter "${exporter_args[@]}" "${PARAM_OUTPUT_IMAGE}" + + # Store the image digest + grep digest /tmp/report.toml | tail -n 1 | tr -d ' \"\n' | sed s/digest=// > "$(results.shp-image-digest.path)" + volumeMounts: + - mountPath: /platform/env + name: platform-env + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 1001 + runAsGroup: 1000 diff --git a/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_namespaced_cr.yaml b/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_namespaced_cr.yaml new file mode 100644 index 00000000..034a17bf --- /dev/null +++ b/kodata/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_namespaced_cr.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: BuildStrategy +metadata: + name: buildpacks-v3 +spec: + volumes: + - name: platform-env + emptyDir: {} + parameters: + - name: platform-api-version + description: The referenced version is the minimum version that all relevant buildpack implementations support. + default: "0.7" + steps: + - name: build-and-push + image: docker.io/paketobuildpacks/builder-jammy-full:latest + env: + - name: CNB_PLATFORM_API + value: $(params.platform-api-version) + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + echo "> Processing environment variables..." + ENV_DIR="/platform/env" + + envs=($(env)) + + # Denying the creation of non required files from system environments. + # The creation of a file named PATH (corresponding to PATH system environment) + # caused failure for python source during pip install (https://github.com/Azure-Samples/python-docs-hello-world) + block_list=("PATH" "HOSTNAME" "PWD" "_" "SHLVL" "HOME" "") + + for env in "${envs[@]}"; do + blocked=false + + IFS='=' read -r key value string <<< "$env" + + for str in "${block_list[@]}"; do + if [[ "$key" == "$str" ]]; then + blocked=true + break + fi + done + + if [ "$blocked" == "false" ]; then + path="${ENV_DIR}/${key}" + echo -n "$value" > "$path" + fi + done + + LAYERS_DIR=/tmp/.shp/layers + CACHE_DIR=/tmp/.shp/cache + + mkdir -p "$CACHE_DIR" "$LAYERS_DIR" + + function announce_phase { + printf "===> %s\n" "$1" + } + + announce_phase "ANALYZING" + /cnb/lifecycle/analyzer -layers="$LAYERS_DIR" "${PARAM_OUTPUT_IMAGE}" + + announce_phase "DETECTING" + /cnb/lifecycle/detector -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + announce_phase "RESTORING" + /cnb/lifecycle/restorer -cache-dir="$CACHE_DIR" -layers="$LAYERS_DIR" + + announce_phase "BUILDING" + /cnb/lifecycle/builder -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + exporter_args=( -layers="$LAYERS_DIR" -report=/tmp/report.toml -cache-dir="$CACHE_DIR" -app="${PARAM_SOURCE_CONTEXT}") + grep -q "buildpack-default-process-type" "$LAYERS_DIR/config/metadata.toml" || exporter_args+=( -process-type web ) + + announce_phase "EXPORTING" + /cnb/lifecycle/exporter "${exporter_args[@]}" "${PARAM_OUTPUT_IMAGE}" + + # Store the image digest + grep digest /tmp/report.toml | tail -n 1 | tr -d ' \"\n' | sed s/digest=// > "$(results.shp-image-digest.path)" + volumeMounts: + - mountPath: /platform/env + name: platform-env + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 1001 + runAsGroup: 1000 diff --git a/kodata/buildstrategy/buildstrategy_kaniko_cr.yaml b/kodata/buildstrategy/buildstrategy_kaniko_cr.yaml new file mode 100644 index 00000000..11057b8c --- /dev/null +++ b/kodata/buildstrategy/buildstrategy_kaniko_cr.yaml @@ -0,0 +1,61 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: kaniko +spec: + steps: + - name: build-and-push + image: gcr.io/kaniko-project/executor:v1.17.0 + workingDir: $(params.shp-source-root) + securityContext: + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - SETFCAP + - KILL + env: + - name: HOME + value: /tekton/home + - name: DOCKER_CONFIG + value: /tekton/home/.docker + - name: AWS_ACCESS_KEY_ID + value: NOT_SET + - name: AWS_SECRET_KEY + value: NOT_SET + command: + - /kaniko/executor + args: + - --dockerfile + - $(params.dockerfile) + - --context + - $(params.shp-source-context) + - --destination + - $(params.shp-output-image) + - --snapshot-mode + - redo + - --no-push + - --tar-path + - $(params.shp-output-directory)/image.tar + # https://github.com/GoogleContainerTools/kaniko/issues/2164 + - --ignore-path + - /product_uuid + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + parameters: + - name: dockerfile + description: The path to the Dockerfile to be used for building the image. + type: string + default: "Dockerfile" + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/kodata/buildstrategy/kaniko/buildstrategy_kaniko-trivy_cr.yaml b/kodata/buildstrategy/kaniko/buildstrategy_kaniko-trivy_cr.yaml new file mode 100644 index 00000000..5b037be4 --- /dev/null +++ b/kodata/buildstrategy/kaniko/buildstrategy_kaniko-trivy_cr.yaml @@ -0,0 +1,87 @@ +# This Build Strategy will intentionally fail if the image has any +# critical CVEs. It will not be pushed into the destination registry +# if any critical vulnerabilities are found. +--- +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: kaniko-trivy +spec: + volumes: + - name: layout + emptyDir: {} + - name: tar + emptyDir: {} + steps: + - name: kaniko-build + image: gcr.io/kaniko-project/executor:v1.17.0 + workingDir: $(params.shp-source-root) + securityContext: + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - SETFCAP + - KILL + env: + - name: HOME + value: /tekton/home + - name: AWS_ACCESS_KEY_ID + value: NOT_SET + - name: AWS_SECRET_KEY + value: NOT_SET + command: + - /kaniko/executor + args: + - --dockerfile + - $(params.dockerfile) + - --context + - $(params.shp-source-context) + - --destination + - $(params.shp-output-image) + - --snapshot-mode + - redo + - --no-push + - --tar-path + - $(params.shp-output-directory)/image.tar + # https://github.com/GoogleContainerTools/kaniko/issues/2164 + - --ignore-path + - /product_uuid + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + - name: trivy-scan + image: docker.io/aquasec/trivy:0.46.0 + command: + - trivy + args: + - image + - --exit-code=1 + - --severity=CRITICAL + - --input + - $(params.shp-output-directory)/image.tar + env: + - name: HOME + value: /tekton/home + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + parameters: + - name: dockerfile + description: The path to the Dockerfile to be used for building the image. + type: string + default: "Dockerfile" + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/kodata/buildstrategy/ko/buildstrategy_ko_cr.yaml b/kodata/buildstrategy/ko/buildstrategy_ko_cr.yaml new file mode 100644 index 00000000..bafc08ff --- /dev/null +++ b/kodata/buildstrategy/ko/buildstrategy_ko_cr.yaml @@ -0,0 +1,116 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: ko +spec: + parameters: + - name: go-flags + description: "Value for the GOFLAGS environment variable." + default: "" + - name: go-version + description: "Version of Go, must match a tag from https://hub.docker.com/_/golang?tab=tags" + default: "1.20" + - name: ko-version + description: "Version of ko, must be either 'latest', or a release name from https://github.com/ko-build/ko/releases" + default: latest + - name: package-directory + description: "The directory inside the context directory containing the main package." + default: "." + - name: target-platform + description: "Target platform to be built. For example: 'linux/arm64'. Multiple platforms can be provided separated by comma, for example: 'linux/arm64,linux/amd64'. The value 'all' will build all platforms supported by the base image. The value 'current' will build the platform on which the build runs." + default: current + volumes: + - name: gocache + description: "Volume to contain the GOCACHE. Can be set to a persistent volume to optimize compilation performance for rebuilds." + overridable: true + emptyDir: {} + steps: + - name: build + image: golang:$(params.go-version) + imagePullPolicy: Always + workingDir: $(params.shp-source-root) + volumeMounts: + - mountPath: /gocache + name: gocache + readOnly: false + env: + - name: DOCKER_CONFIG + value: /tekton/home/.docker + - name: HOME + value: /tekton/home + - name: GOFLAGS + value: $(params.go-flags) + - name: GOCACHE + value: /gocache + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + - name: PARAM_OUTPUT_DIRECTORY + value: $(params.shp-output-directory) + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_SOURCE_ROOT + value: $(params.shp-source-root) + - name: PARAM_TARGET_PLATFORM + value: $(params.target-platform) + - name: PARAM_PACKAGE_DIRECTORY + value: $(params.package-directory) + - name: PARAM_KO_VERSION + value: $(params.ko-version) + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + # Determine the ko version + KO_VERSION="${PARAM_KO_VERSION}" + if [ "${KO_VERSION}" == "latest" ]; then + KO_VERSION=$(curl --silent "https://api.github.com/repos/ko-build/ko/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') + fi + + # Create one variable with v-suffix and one without as we need both for the download URL + if [[ ${KO_VERSION} = v* ]]; then + KO_VERSION_WITH_V=${KO_VERSION} + KO_VERSION_WITHOUT_V=${KO_VERSION:1} + else + KO_VERSION_WITH_V=v${KO_VERSION} + KO_VERSION_WITHOUT_V=${KO_VERSION} + fi + + # Download ko to the temp directory + curl -f -s -L "https://github.com/ko-build/ko/releases/download/${KO_VERSION_WITH_V}/ko_${KO_VERSION_WITHOUT_V}_$(uname)_$(uname -m | sed 's/aarch64/arm64/').tar.gz" | tar xzf - -C /tmp ko + + # Determine the platform + PLATFORM="${PARAM_TARGET_PLATFORM}" + if [ "${PLATFORM}" == "current" ]; then + PLATFORM="$(uname | tr '[:upper:]' '[:lower:]')/$(uname -m | sed -e 's/x86_64/amd64/' -e 's/aarch64/arm64/')" + fi + + # Print version information + go version + echo "ko version $(/tmp/ko version)" + + # Allow directory to be owned by other user which is normal for a volume-mounted directory. + # This allows Go to run git commands to access repository metadata. + # Documentation: https://git-scm.com/docs/git-config/2.39.0#Documentation/git-config.txt-safedirectory + git config --global --add safe.directory "${PARAM_SOURCE_ROOT}" + + # Run ko + + export GOROOT="$(go env GOROOT)" + + pushd "${PARAM_SOURCE_CONTEXT}" > /dev/null + /tmp/ko build "${PARAM_PACKAGE_DIRECTORY}" --oci-layout-path="${PARAM_OUTPUT_DIRECTORY}" --platform="${PLATFORM}" --push=false + popd > /dev/null + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 1000 + runAsGroup: 1000 diff --git a/kodata/buildstrategy/source-to-image/buildstrategy_source-to-image-redhat_cr.yaml b/kodata/buildstrategy/source-to-image/buildstrategy_source-to-image-redhat_cr.yaml new file mode 100644 index 00000000..c2be5b14 --- /dev/null +++ b/kodata/buildstrategy/source-to-image/buildstrategy_source-to-image-redhat_cr.yaml @@ -0,0 +1,163 @@ +--- +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: source-to-image-redhat +spec: + volumes: + - name: s2i + emptyDir: {} + steps: + - name: s2i-generate + image: registry.redhat.io/ocp-tools-43-tech-preview/source-to-image-rhel8:latest + workingDir: $(params.shp-source-root) + command: + - /usr/local/bin/s2i + args: + - build + - $(params.shp-source-context) + - $(params.builder-image) + - $(params.shp-output-image) + - --as-dockerfile=/s2i/Dockerfile + volumeMounts: + - name: s2i + mountPath: /s2i + - name: buildah + image: quay.io/containers/buildah:v1.32.0 + workingDir: /s2i + securityContext: + capabilities: + add: + - "SETFCAP" + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + # Parse parameters + image= + target= + registriesBlock="" + inRegistriesBlock=false + registriesInsecure="" + inRegistriesInsecure=false + registriesSearch="" + inRegistriesSearch=false + while [[ $# -gt 0 ]]; do + arg="$1" + shift + + if [ "${arg}" == "--image" ]; then + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + image="$1" + shift + elif [ "${arg}" == "--target" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + target="$1" + shift + elif [ "${arg}" == "--registries-block" ]; then + inRegistriesBlock=true + inRegistriesInsecure=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-insecure" ]; then + inRegistriesInsecure=true + inRegistriesBlock=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-search" ]; then + inRegistriesSearch=true + inRegistriesBlock=false + inRegistriesInsecure=false + elif [ "${inRegistriesBlock}" == "true" ]; then + registriesBlock="${registriesBlock}'${arg}', " + elif [ "${inRegistriesInsecure}" == "true" ]; then + registriesInsecure="${registriesInsecure}'${arg}', " + elif [ "${inRegistriesSearch}" == "true" ]; then + registriesSearch="${registriesSearch}'${arg}', " + else + echo "Invalid usage" + exit 1 + fi + done + + echo "[INFO] Creating registries config file..." + if [ "${registriesSearch}" != "" ]; then + cat <>/tmp/registries.conf + [registries.search] + registries = [${registriesSearch::-2}] + + EOF + fi + if [ "${registriesInsecure}" != "" ]; then + cat <>/tmp/registries.conf + [registries.insecure] + registries = [${registriesInsecure::-2}] + + EOF + fi + if [ "${registriesBlock}" != "" ]; then + cat <>/tmp/registries.conf + [registries.block] + registries = [${registriesBlock::-2}] + + EOF + fi + + # Building the image + echo "[INFO] Building image ${image}" + buildah --storage-driver=$(params.storage-driver) bud \ + --registries-conf=/tmp/registries.conf \ + --tag="${image}" + + # Write the image + echo "[INFO] Writing image ${image}" + buildah --storage-driver=$(params.storage-driver) push \ + "${image}" \ + "oci:${target}" + # That's the separator between the shell script and its args + - -- + - --image + - $(params.shp-output-image) + - --registries-block + - $(params.registries-block[*]) + - --registries-insecure + - $(params.registries-insecure[*]) + - --registries-search + - $(params.registries-search[*]) + - --target + - $(params.shp-output-directory) + volumeMounts: + - name: s2i + mountPath: /s2i + parameters: + - name: registries-block + description: The registries that need to block pull access. + type: array + defaults: [] + - name: registries-insecure + description: The fully-qualified name of insecure registries. An insecure registry is one that does not have a valid SSL certificate or only supports HTTP. + type: array + defaults: [] + - name: registries-search + description: The registries for searching short name images such as `golang:latest`. + type: array + defaults: + - docker.io + - quay.io + - name: builder-image + description: The builder image. + type: string + - name: storage-driver + description: "The storage driver to use, such as 'overlay' or 'vfs'." + type: string + default: "vfs" + # For details see the "--storage-driver" section of https://github.com/containers/buildah/blob/main/docs/buildah.1.md#options + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/kodata/buildstrategy/source-to-image/buildstrategy_source-to-image_cr.yaml b/kodata/buildstrategy/source-to-image/buildstrategy_source-to-image_cr.yaml new file mode 100644 index 00000000..aaf06d93 --- /dev/null +++ b/kodata/buildstrategy/source-to-image/buildstrategy_source-to-image_cr.yaml @@ -0,0 +1,73 @@ +apiVersion: shipwright.io/v1beta1 +kind: ClusterBuildStrategy +metadata: + name: source-to-image +spec: + volumes: + - name: gen-source + emptyDir: {} + steps: + - command: + - /usr/local/bin/s2i + - build + - $(params.shp-source-context) + - $(params.builder-image) + - '--as-dockerfile' + - /gen-source/Dockerfile.gen + image: quay.io/openshift-pipeline/s2i:nightly + imagePullPolicy: Always + name: s2i-build-as-dockerfile + volumeMounts: + - mountPath: /gen-source + name: gen-source + workingDir: $(params.shp-source-root) + - name: build-and-push + image: gcr.io/kaniko-project/executor:v1.17.0 + command: + - /kaniko/executor + args: + - --dockerfile + - /gen-source/Dockerfile.gen + - --context + - /gen-source + - --destination + - $(params.shp-output-image) + - --snapshot-mode + - redo + - --no-push + - --tar-path + - $(params.shp-output-directory)/image.tar + # https://github.com/GoogleContainerTools/kaniko/issues/2164 + - --ignore-path + - /product_uuid + env: + - name: DOCKER_CONFIG + value: /tekton/home/.docker + - name: HOME + value: /tekton/home + - name: AWS_ACCESS_KEY_ID + value: NOT_SET + - name: AWS_SECRET_KEY + value: NOT_SET + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - SETFCAP + - KILL + volumeMounts: + - mountPath: /gen-source + name: gen-source + workingDir: /gen-source + parameters: + - name: builder-image + description: The builder image. + type: string + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/pkg/reconciler/build/build_test.go b/pkg/reconciler/build/build_test.go index 963eec54..02da683c 100644 --- a/pkg/reconciler/build/build_test.go +++ b/pkg/reconciler/build/build_test.go @@ -72,7 +72,7 @@ func TestReconcileBuildStrategy(t *testing.T) { }, }, webhookdeployment: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Namespace: "shipwright-build", Name: "shp-build-webhook"}, + ObjectMeta: metav1.ObjectMeta{Namespace: "shipwright-build", Name: "shipwright-build-webhook"}, Status: appsv1.DeploymentStatus{ Conditions: []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, diff --git a/pkg/reconciler/common/util.go b/pkg/reconciler/common/util.go index 8cbfa66a..ece60519 100644 --- a/pkg/reconciler/common/util.go +++ b/pkg/reconciler/common/util.go @@ -222,5 +222,5 @@ func GetWebhookDeploymentName() string { if IsOpenShiftPlatform() { return "openshift-builds-webhook" } - return "shp-build-webhook" + return "shipwright-build-webhook" }