diff --git a/Dockerfile.combined b/Dockerfile.combined index 535cdc82d..b4548e9cf 100644 --- a/Dockerfile.combined +++ b/Dockerfile.combined @@ -23,6 +23,10 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o metricsexporter cmd/met # vgmanager needs 'nsenter' and other basic linux utils to correctly function FROM centos:8 + +# Install required utilities +RUN dnf install -y openssl && dnf clean all + WORKDIR / COPY --from=builder /workspace/manager . COPY --from=builder /workspace/vgmanager . diff --git a/controllers/defaults.go b/controllers/defaults.go index d54ce2e17..7fc15f042 100644 --- a/controllers/defaults.go +++ b/controllers/defaults.go @@ -86,7 +86,6 @@ var ( CSIKubeletRootDir = "/var/lib/kubelet/" NodeContainerName = "topolvm-node" TopolvmNodeContainerHealthzName = "healthz" - auxImage = "registry.access.redhat.com/ubi8/ubi-minimal" LvmdConfigFile = "/etc/topolvm/lvmd.yaml" // topoLVM Node resource requests/limits diff --git a/controllers/lvmcluster_controller.go b/controllers/lvmcluster_controller.go index 1fb9b57f9..1421e488c 100644 --- a/controllers/lvmcluster_controller.go +++ b/controllers/lvmcluster_controller.go @@ -19,9 +19,12 @@ package controllers import ( "context" "fmt" + "os" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "github.com/go-logr/logr" secv1client "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1" @@ -57,6 +60,7 @@ type LVMClusterReconciler struct { ClusterType ClusterType SecurityClient secv1client.SecurityV1Interface Namespace string + ImageName string } //+kubebuilder:rbac:groups=lvm.topolvm.io,resources=lvmclusters,verbs=get;list;watch;create;update;patch;delete @@ -101,6 +105,13 @@ func (r *LVMClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) r.Log.Error(err, "failed to check cluster type") return ctrl.Result{}, err } + + err = r.getRunningPodImage(ctx) + if err != nil { + r.Log.Error(err, "failed to get operator image") + return ctrl.Result{}, err + } + result, reconcileError := r.reconcile(ctx, lvmCluster) statusError := r.updateLVMClusterStatus(ctx, lvmCluster) @@ -290,3 +301,33 @@ func (r *LVMClusterReconciler) checkIfOpenshift(ctx context.Context) error { func IsOpenshift(r *LVMClusterReconciler) bool { return r.ClusterType == ClusterTypeOCP } + +// getRunningPodImage gets the operator image and set it in reconciler struct +func (r *LVMClusterReconciler) getRunningPodImage(ctx context.Context) error { + + if r.ImageName == "" { + // 'POD_NAME' and 'POD_NAMESPACE' are set in env of lvm-operator when running as a container + podName := os.Getenv("POD_NAME") + if podName == "" { + return fmt.Errorf("failed to get pod name env variable") + } + + pod := &corev1.Pod{} + if err := r.Get(ctx, types.NamespacedName{Name: podName, Namespace: r.Namespace}, pod); err != nil { + return fmt.Errorf("failed to get pod %s in namespace %s", podName, r.Namespace) + } + + for _, c := range pod.Spec.Containers { + if c.Name == LVMOperatorContainerName { + r.ImageName = c.Image + return nil + } + } + + return fmt.Errorf("failed to get container image for %s in pod %s", LVMOperatorContainerName, podName) + + } + + return nil + +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 28e7d32ea..920241d99 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -61,6 +61,7 @@ const ( testLvmClusterName = "test-lvmcluster" testLvmClusterNamespace = "openshift-storage" testDeviceClassName = "test" + testImageName = "test" ) var _ = BeforeSuite(func() { @@ -103,6 +104,7 @@ var _ = BeforeSuite(func() { SecurityClient: secv1client.NewForConfigOrDie(k8sManager.GetConfig()), Namespace: testLvmClusterNamespace, Log: ctrl.Log.WithName("controllers").WithName("LvmCluster"), + ImageName: testImageName, }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) diff --git a/controllers/topolvm_controller.go b/controllers/topolvm_controller.go index 3a41f8b6d..4c44d49e1 100644 --- a/controllers/topolvm_controller.go +++ b/controllers/topolvm_controller.go @@ -34,7 +34,7 @@ func (c topolvmController) getName() string { func (c topolvmController) ensureCreated(r *LVMClusterReconciler, ctx context.Context, lvmCluster *lvmv1alpha1.LVMCluster) error { // get the desired state of topolvm controller deployment - desiredDeployment := getControllerDeployment(lvmCluster, r.Namespace) + desiredDeployment := getControllerDeployment(lvmCluster, r.Namespace, r.ImageName) existingDeployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: desiredDeployment.Name, @@ -105,8 +105,7 @@ func (c topolvmController) setTopolvmControllerDesiredState(existing, desired *a return nil } -func getControllerDeployment(lvmCluster *lvmv1alpha1.LVMCluster, namespace string) *appsv1.Deployment { - +func getControllerDeployment(lvmCluster *lvmv1alpha1.LVMCluster, namespace string, initImage string) *appsv1.Deployment { // Topolvm CSI Controller Deployment var replicas int32 = 1 volumes := []corev1.Volume{ @@ -116,7 +115,7 @@ func getControllerDeployment(lvmCluster *lvmv1alpha1.LVMCluster, namespace strin // TODO: Remove custom generation of TLS certs, current it's being used in topolvm controller manager initContainers := []corev1.Container{ - *getInitContainer(), + *getInitContainer(initImage), } // get all containers that are part of csi controller deployment @@ -158,11 +157,11 @@ func getControllerDeployment(lvmCluster *lvmv1alpha1.LVMCluster, namespace strin return controllerDeployment } -func getInitContainer() *corev1.Container { +func getInitContainer(initImage string) *corev1.Container { // generation of tls certs command := []string{ - "sh", + "/usr/bin/bash", "-c", "openssl req -nodes -x509 -newkey rsa:4096 -subj '/DC=self_signed_certificate' -keyout /certs/tls.key -out /certs/tls.crt -days 3650", } @@ -173,7 +172,7 @@ func getInitContainer() *corev1.Container { ssCertGenerator := &corev1.Container{ Name: "self-signed-cert-generator", - Image: "alpine/openssl", + Image: initImage, Command: command, VolumeMounts: volumeMounts, } diff --git a/controllers/topolvm_node.go b/controllers/topolvm_node.go index d5387db79..4cd028d66 100644 --- a/controllers/topolvm_node.go +++ b/controllers/topolvm_node.go @@ -49,7 +49,8 @@ func (n topolvmNode) ensureCreated(r *LVMClusterReconciler, ctx context.Context, unitLogger := r.Log.WithValues("topolvmNode", n.getName()) // get desired daemonSet spec - dsTemplate := getNodeDaemonSet(lvmCluster, r.Namespace) + dsTemplate := getNodeDaemonSet(lvmCluster, r.Namespace, r.ImageName) + // create desired daemonSet or update mutable fields on existing one ds := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ @@ -127,7 +128,8 @@ func (n topolvmNode) updateStatus(r *LVMClusterReconciler, ctx context.Context, return nil } -func getNodeDaemonSet(lvmCluster *lvmv1alpha1.LVMCluster, namespace string) *appsv1.DaemonSet { +func getNodeDaemonSet(lvmCluster *lvmv1alpha1.LVMCluster, namespace string, initImage string) *appsv1.DaemonSet { + hostPathDirectory := corev1.HostPathDirectory hostPathDirectoryOrCreateType := corev1.HostPathDirectoryOrCreate storageMedium := corev1.StorageMediumMemory @@ -163,7 +165,7 @@ func getNodeDaemonSet(lvmCluster *lvmv1alpha1.LVMCluster, namespace string) *app EmptyDir: &corev1.EmptyDirVolumeSource{Medium: storageMedium}}}, } - initContainers := []corev1.Container{*getNodeInitContainer()} + initContainers := []corev1.Container{*getNodeInitContainer(initImage)} containers := []corev1.Container{*getLvmdContainer(), *getNodeContainer(), *getCsiRegistrarContainer(), *getNodeLivenessProbeContainer()} // Affinity and tolerations @@ -210,9 +212,9 @@ func getNodeDaemonSet(lvmCluster *lvmv1alpha1.LVMCluster, namespace string) *app return nodeDaemonSet } -func getNodeInitContainer() *corev1.Container { +func getNodeInitContainer(initImage string) *corev1.Container { command := []string{ - "sh", + "/usr/bin/bash", "-c", fmt.Sprintf("until [ -f %s ]; do echo waiting for lvmd config file; sleep 5; done", LvmdConfigFile), } @@ -223,7 +225,7 @@ func getNodeInitContainer() *corev1.Container { fileChecker := &corev1.Container{ Name: "file-checker", - Image: auxImage, + Image: initImage, Command: command, VolumeMounts: volumeMounts, } diff --git a/controllers/vgmanager.go b/controllers/vgmanager.go index 5890aa5de..7610b770f 100644 --- a/controllers/vgmanager.go +++ b/controllers/vgmanager.go @@ -43,13 +43,10 @@ func (v vgManager) ensureCreated(r *LVMClusterReconciler, ctx context.Context, l unitLogger := r.Log.WithValues("resourceManager", v.getName()) // get desired daemonset spec - dsTemplate, err := newVGManagerDaemonset(r, ctx, lvmCluster) - if err != nil { - return fmt.Errorf("failed to get new VGManager Daemonset due to %v", err) - } + dsTemplate := newVGManagerDaemonset(lvmCluster, r.Namespace, r.ImageName) // controller reference - err = ctrl.SetControllerReference(lvmCluster, &dsTemplate, r.Scheme) + err := ctrl.SetControllerReference(lvmCluster, &dsTemplate, r.Scheme) if err != nil { return fmt.Errorf("failed to set controller reference on vgManager daemonset %q. %v", dsTemplate.Name, err) } diff --git a/controllers/vgmanager_daemonset.go b/controllers/vgmanager_daemonset.go index ee6a7045b..f3dcdc517 100644 --- a/controllers/vgmanager_daemonset.go +++ b/controllers/vgmanager_daemonset.go @@ -17,15 +17,12 @@ limitations under the License. package controllers import ( - "context" - "fmt" "os" lvmv1alpha1 "github.com/red-hat-storage/lvm-operator/api/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" ) var ( @@ -117,7 +114,7 @@ var ( ) // newVGManagerDaemonset returns the desired vgmanager daemonset for a given LVMCluster -func newVGManagerDaemonset(r *LVMClusterReconciler, ctx context.Context, lvmCluster *lvmv1alpha1.LVMCluster) (appsv1.DaemonSet, error) { +func newVGManagerDaemonset(lvmCluster *lvmv1alpha1.LVMCluster, namespace string, vgImage string) appsv1.DaemonSet { // aggregate nodeSelector and tolerations from all deviceClasses nodeSelector, tolerations := extractNodeSelectorAndTolerations(lvmCluster) volumes := []corev1.Volume{LVMDConfVol, DevHostDirVol, UDevHostDirVol, SysHostDirVol} @@ -128,17 +125,10 @@ func newVGManagerDaemonset(r *LVMClusterReconciler, ctx context.Context, lvmClus // try to get vgmanager image from env and on absence get from running pod // TODO: investigate why storing this env in a variable is failing tests image := os.Getenv("VGMANAGER_IMAGE") - var err error if image == "" { - image, err = getRunningPodImage(r, ctx) - if err != nil { - r.Log.Error(err, "failed to get image from running operator") - return appsv1.DaemonSet{}, err - } + image = vgImage } - r.Log.Info("creating VG manager deployment", "image", image) - command := []string{ "/vgmanager", } @@ -187,7 +177,7 @@ func newVGManagerDaemonset(r *LVMClusterReconciler, ctx context.Context, lvmClus ds := appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: VGManagerUnit, - Namespace: r.Namespace, + Namespace: namespace, Labels: labels, }, Spec: appsv1.DaemonSetSpec{ @@ -211,28 +201,5 @@ func newVGManagerDaemonset(r *LVMClusterReconciler, ctx context.Context, lvmClus // set nodeSelector setDaemonsetNodeSelector(nodeSelector, &ds) - return ds, nil -} - -func getRunningPodImage(r *LVMClusterReconciler, ctx context.Context) (string, error) { - - // 'POD_NAME' and 'POD_NAMESPACE' are set in env of lvm-operator when running as a container - podName := os.Getenv("POD_NAME") - if podName == "" { - return "", fmt.Errorf("failed to get pod name env variable") - } - - pod := &corev1.Pod{} - if err := r.Get(ctx, types.NamespacedName{Name: podName, Namespace: r.Namespace}, pod); err != nil { - return "", fmt.Errorf("failed to get pod %s in namespace %s", podName, r.Namespace) - } - - for _, c := range pod.Spec.Containers { - if c.Name == LVMOperatorContainerName { - return c.Image, nil - } - } - - return "", fmt.Errorf("failed to get container image for %s in pod %s", LVMOperatorContainerName, podName) - + return ds }