From d93a24d2ddea6b235eff88f7a20188ea4ede73d4 Mon Sep 17 00:00:00 2001 From: Alan Rodrigues Date: Wed, 22 May 2024 15:32:36 -0700 Subject: [PATCH 1/2] introduce security profile to restricted , creates copier reading from a yaml template, change log level of number of runs from debug to info as info is default --- cmd/diskautoscaler/main.go | 2 +- pkg/diskscaler/copier.yaml | 44 +++++++++++ pkg/diskscaler/diskscaler.go | 141 +++++++++++++++++++++++------------ pkg/diskscaler/service.go | 11 +-- pkg/diskscaler/setup.go | 4 +- 5 files changed, 147 insertions(+), 55 deletions(-) create mode 100644 pkg/diskscaler/copier.yaml diff --git a/cmd/diskautoscaler/main.go b/cmd/diskautoscaler/main.go index 1f7c889..eec66d9 100644 --- a/cmd/diskautoscaler/main.go +++ b/cmd/diskautoscaler/main.go @@ -119,7 +119,7 @@ func main() { mux := http.NewServeMux() - err = diskscaler.Setup(mux, k8sRest, baseK8sClient, dynamicK8sClient) + err = diskscaler.Setup(mux, k8sRest, baseK8sClient, dynamicK8sClient, Version) if err != nil { log.Error().Err(err).Msgf("Kubescaler setup failed") } diff --git a/pkg/diskscaler/copier.yaml b/pkg/diskscaler/copier.yaml new file mode 100644 index 0000000..d557f6f --- /dev/null +++ b/pkg/diskscaler/copier.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ .PodName }} + namespace: {{ .Namespace }} + labels: + app.kubernetes.io/name: {{.Name }} + app.kubernetes.io/instance: {{ .PodName }} + app.kubernetes.io/version: {{ .Version }} + app.kubernetes.io/component: {{ .Component }} + app.kubernetes.io/part-of: {{ .PartOf }} + app.kubernetes.io/managed-by: {{ .Service }} +spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + runAsUser: {{ .RunAsUser }} + containers: + - name: {{ .ContainerName }} + image: {{ .Image }} + command: ["/bin/sh", "-c", "--"] + args: ["sleep infinity"] + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: orig-vol-mount + mountPath: /oldData + - name: backup-vol-mount + mountPath: /newData + volumes: + - name: orig-vol-mount + persistentVolumeClaim: + claimName: {{.OriginalPVC}} + - name: backup-vol-mount + persistentVolumeClaim: + claimName: {{.NewPVC}} \ No newline at end of file diff --git a/pkg/diskscaler/diskscaler.go b/pkg/diskscaler/diskscaler.go index 4d29faf..dc11ea7 100644 --- a/pkg/diskscaler/diskscaler.go +++ b/pkg/diskscaler/diskscaler.go @@ -3,8 +3,12 @@ package diskscaler import ( "bytes" "context" + "embed" "fmt" + "html/template" + "io" "math/rand" + "os" "slices" "strconv" "strings" @@ -24,6 +28,7 @@ import ( "k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/util/retry" "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/yaml" ) var supportedSCProvisioner = []string{"ebs.csi.aws.com"} @@ -35,10 +40,33 @@ const ( inactivityDuringDelay = 10 * time.Second // Setting a timeout of 4 minutes on any creation or delete operation of disk scaler diskScalingOperationTimeout = 4 * time.Minute + copierContainerName = "temp-container" + copierDefaultImage = "cgr.dev/chainguard/wolfi-base:latest" + ServiceMetaDataLabel = "kubecost-disk-autoscaler" + PartOfMetaDataLabel = "kubecost-disk-autoscaler" + ComponentMetaDataLabel = "copy-pod" ) var allowedCharactersForPVCName = []rune("abcdefghijklmnopqrstuvwxyz") +type CopierTemplateData struct { + Name string + Version string + Component string + PartOf string + Service string + PodName string + Namespace string + ContainerName string + OriginalPVC string + NewPVC string + Image string + RunAsUser int64 +} + +//go:embed copier.yaml +var templateFS embed.FS + type DiskScaler struct { clientConfig *rest.Config basicK8sClient kubernetes.Interface @@ -46,6 +74,7 @@ type DiskScaler struct { clusterID string kubecostsvc *pvsizingrecommendation.KubecostService auditMode bool + version string } type pvcDetails struct { @@ -66,7 +95,8 @@ func NewDiskScaler(clientConfig *rest.Config, dynamicK8sClient *dynamic.DynamicClient, clusterID string, kubecostsvc *pvsizingrecommendation.KubecostService, - auditMode bool) (*DiskScaler, error) { + auditMode bool, + version string) (*DiskScaler, error) { if basicK8sClient == nil { return nil, fmt.Errorf("must have a Kubernetes client") } @@ -82,6 +112,7 @@ func NewDiskScaler(clientConfig *rest.Config, clusterID: clusterID, kubecostsvc: kubecostsvc, auditMode: auditMode, + version: version, }, nil } @@ -616,54 +647,13 @@ func (ds *DiskScaler) createPVCFromASpec(ctx context.Context, namespace string, // dataMoverTransientPod create a transient pod to move data between original PV claim volume source to new PV Claim volume source func (ds *DiskScaler) dataMoverTransientPod(ctx context.Context, namespace string, copierPodName string, originalPVC string, newPVC string) error { cpCommand := "if [ -z \"$(ls -A /oldData)\" ]; then echo \"directory is empty no need to copy\"; else cp -r /oldData/* /newData/; fi" - req := &v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: copierPodName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "temp-container", - Image: "ubuntu", - Command: []string{"/bin/bash", "-c", "sleep infinity"}, - VolumeMounts: []v1.VolumeMount{ - { - Name: "orig-vol-mount", - MountPath: "/oldData", - }, - { - Name: "backup-vol-mount", - MountPath: "/newData", - }, - }, - }, - }, - Volumes: []v1.Volume{ - { - Name: "orig-vol-mount", - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: originalPVC, - }, - }, - }, - { - Name: "backup-vol-mount", - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: newPVC, - }, - }, - }, - }, - }, + + podSpec, err := createCopierPodKubernetesObject(copierPodName, namespace, originalPVC, newPVC, ds.version) + if err != nil { + return fmt.Errorf("createCopierPodKubernetesObject failed: %w", err) } - resp, err := ds.basicK8sClient.CoreV1().Pods(namespace).Create(ctx, req, metav1.CreateOptions{}) + resp, err := ds.basicK8sClient.CoreV1().Pods(namespace).Create(ctx, &podSpec, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to create copier pod %s in namespace %s with err: %w ", copierPodName, namespace, err) } @@ -819,6 +809,63 @@ func (ds *DiskScaler) deleteTransientPod(ctx context.Context, namespace string, return nil } +func createCopierPodKubernetesObject(copierPodName, namespace, originalPVC, newPVC, version string) (v1.Pod, error) { + // Read the pod template YAML file + podTemplateYAML, err := templateFS.ReadFile("copier.yaml") + // podTemplateYAML, err := os.ReadFile("copier.yaml") + if err != nil { + return v1.Pod{}, fmt.Errorf("failed to read the copier.yaml with err: %w ", err) + } + + data := CopierTemplateData{ + Name: kubecostDataMoverTransientPodName, + Version: version, + Component: ComponentMetaDataLabel, + PartOf: PartOfMetaDataLabel, + Service: ServiceMetaDataLabel, + PodName: copierPodName, + Namespace: namespace, + ContainerName: copierContainerName, + Image: copierDefaultImage, + OriginalPVC: originalPVC, + NewPVC: newPVC, + RunAsUser: 1000, + } + + //tmpl, err := template.New("pod").Parse(string(podTemplateYAML)) + tmpl := template.Must(template.New("pod").Parse(string(podTemplateYAML))) + + // Create a buffer to hold the final YAML + var buf []byte + bufFile, err := os.CreateTemp("", fmt.Sprintf("%s.yaml", copierPodName)) + if err != nil { + return v1.Pod{}, fmt.Errorf("failed to create temp file for pod %s in namespace %s: %v", copierPodName, namespace, err) + } + defer os.Remove(bufFile.Name()) + + // Execute the template and write to the buffer + if err := tmpl.Execute(bufFile, data); err != nil { + return v1.Pod{}, fmt.Errorf("failed to execute template for pod %s in namespace %s: %v", copierPodName, namespace, err) + } + + // Read the buffer content as a string + _, err = bufFile.Seek(0, 0) + if err != nil { + return v1.Pod{}, fmt.Errorf("unable to read entire buffer to execute template for pod %s in namespace %s: %v", copierPodName, namespace, err) + } + buf, err = io.ReadAll(bufFile) + if err != nil { + return v1.Pod{}, fmt.Errorf("failed to read temp file for pod %s in namespace %s: %v", copierPodName, namespace, err) + } + + var pod v1.Pod + if err := yaml.Unmarshal(buf, &pod); err != nil { + return v1.Pod{}, fmt.Errorf("failed to unmarshal YAML for pod %s in namespace %s: %v", copierPodName, namespace, err) + } + + return pod, nil +} + // isGreaterQuantity returns true if resizeTo is greater than original size func isGreaterQuantity(originalSize resource.Quantity, resizeTo resource.Quantity) bool { return resizeTo.Cmp(originalSize) == 1 diff --git a/pkg/diskscaler/service.go b/pkg/diskscaler/service.go index bab3b85..f143b48 100644 --- a/pkg/diskscaler/service.go +++ b/pkg/diskscaler/service.go @@ -67,10 +67,11 @@ func NewDiskScalerService(clientConfig *rest.Config, resizeAll bool, auditMode bool, kubecostSvc *pvsizingrecommendation.KubecostService, - excludedNamespaces []string) (*DiskScalerService, error) { + excludedNamespaces []string, + version string) (*DiskScalerService, error) { // To-DO :fill it via kubecost API clusterID := "localCluster" - ds, err := NewDiskScaler(clientConfig, k8sClient, dynamicK8sClient, clusterID, kubecostSvc, auditMode) + ds, err := NewDiskScaler(clientConfig, k8sClient, dynamicK8sClient, clusterID, kubecostSvc, auditMode, version) if err != nil { return nil, fmt.Errorf("unable to create NewDiskScaler: %w", err) } @@ -204,12 +205,12 @@ func (dss *DiskScalerService) startAutomatedScaling() error { if dss.auditMode { return } - log.Debug().Msgf("status at %s :%+v, triggered the disk scaling", diskAutoScalerRun, status) + log.Info().Msgf("status at %s :%+v, triggered the disk scaling", diskAutoScalerRun, status) if status.NumEnabled == 0 { - log.Debug().Msgf("No workloads have autoscaling enabled at %s", diskAutoScalerRun) + log.Info().Msgf("No workloads have autoscaling enabled at %s", diskAutoScalerRun) } if status.NumEligible == 0 { - log.Debug().Msgf("No workload with autoscaling eligible at %s", diskAutoScalerRun) + log.Info().Msgf("No workload with autoscaling eligible at %s", diskAutoScalerRun) } } }() diff --git a/pkg/diskscaler/setup.go b/pkg/diskscaler/setup.go index a7067bf..5e77974 100644 --- a/pkg/diskscaler/setup.go +++ b/pkg/diskscaler/setup.go @@ -18,7 +18,7 @@ const ( KubecostNamespace = "kubecost" ) -func Setup(mux *http.ServeMux, clientConfig *rest.Config, k8sClient kubernetes.Interface, dynamicK8sClient *dynamic.DynamicClient) error { +func Setup(mux *http.ServeMux, clientConfig *rest.Config, k8sClient kubernetes.Interface, dynamicK8sClient *dynamic.DynamicClient, version string) error { costModelPath, err := getDiskScalerCostModelPath() if len(costModelPath) == 0 { return fmt.Errorf("setup of Disk Auto Scaler failed: %w", err) @@ -47,7 +47,7 @@ func Setup(mux *http.ServeMux, clientConfig *rest.Config, k8sClient kubernetes.I } recommendationSvc := pvsizingrecommendation.NewKubecostService(costModelPath) - dss, err := NewDiskScalerService(clientConfig, k8sClient, dynamicK8sClient, resizeAll, auditMode, recommendationSvc, excludedNamespaces) + dss, err := NewDiskScalerService(clientConfig, k8sClient, dynamicK8sClient, resizeAll, auditMode, recommendationSvc, excludedNamespaces, version) if err != nil { return fmt.Errorf("failed to create disk scaler service: %w", err) } From 98aa36b5066dda3339c0e93219d9446c2f7d3c68 Mon Sep 17 00:00:00 2001 From: Alan Rodrigues Date: Wed, 22 May 2024 16:27:33 -0700 Subject: [PATCH 2/2] run go mod tidy --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 85de0cd..c45478d 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( k8s.io/apimachinery v0.30.1 k8s.io/client-go v0.30.1 k8s.io/kubectl v0.30.1 + sigs.k8s.io/yaml v1.3.0 ) require ( @@ -70,5 +71,4 @@ require ( k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect )