diff --git a/cmd/minikube/cmd/config/config.go b/cmd/minikube/cmd/config/config.go index effe390dec36..a61325e1354c 100644 --- a/cmd/minikube/cmd/config/config.go +++ b/cmd/minikube/cmd/config/config.go @@ -144,7 +144,7 @@ var settings = []Setting{ name: "default-storageclass", set: SetBool, validations: []setFn{IsValidAddon}, - callbacks: []setFn{EnableOrDisableAddon}, + callbacks: []setFn{EnableOrDisableStorageClasses}, }, { name: "heapster", @@ -186,7 +186,7 @@ var settings = []Setting{ name: "default-storageclass", set: SetBool, validations: []setFn{IsValidAddon}, - callbacks: []setFn{EnableOrDisableDefaultStorageClass}, + callbacks: []setFn{EnableOrDisableStorageClasses}, }, { name: "storage-provisioner", @@ -194,6 +194,12 @@ var settings = []Setting{ validations: []setFn{IsValidAddon}, callbacks: []setFn{EnableOrDisableAddon}, }, + { + name: "storage-provisioner-gluster", + set: SetBool, + validations: []setFn{IsValidAddon}, + callbacks: []setFn{EnableOrDisableStorageClasses}, + }, { name: "metrics-server", set: SetBool, diff --git a/cmd/minikube/cmd/config/util.go b/cmd/minikube/cmd/config/util.go index 29104b083451..683fd5d260a9 100644 --- a/cmd/minikube/cmd/config/util.go +++ b/cmd/minikube/cmd/config/util.go @@ -26,6 +26,7 @@ import ( "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/cluster" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/storageclass" ) @@ -138,18 +139,30 @@ func EnableOrDisableAddon(name string, val string) error { return nil } -func EnableOrDisableDefaultStorageClass(name, val string) error { +func EnableOrDisableStorageClasses(name, val string) error { enable, err := strconv.ParseBool(val) if err != nil { return errors.Wrap(err, "Error parsing boolean") } - // Special logic to disable the default storage class - if !enable { - err := storageclass.DisableDefaultStorageClass() + class := constants.DefaultStorageClassProvisioner + if name == "storage-provisioner-gluster" { + class = "glusterfile" + } + + if enable { + // Only StorageClass for 'name' should be marked as default + err := storageclass.SetDefaultStorageClass(class) if err != nil { - return errors.Wrap(err, "Error disabling default storage class") + return errors.Wrapf(err, "Error making %s the default storage class", class) + } + } else { + // Unset the StorageClass as default + err := storageclass.DisableDefaultStorageClass(class) + if err != nil { + return errors.Wrapf(err, "Error disabling %s as the default storage class", class) } } + return EnableOrDisableAddon(name, val) } diff --git a/deploy/addons/storage-provisioner-gluster/README.md b/deploy/addons/storage-provisioner-gluster/README.md new file mode 100644 index 000000000000..9ae656db47ba --- /dev/null +++ b/deploy/addons/storage-provisioner-gluster/README.md @@ -0,0 +1,141 @@ +## storage-provisioner-gluster addon +[Gluster](https://gluster.org/), a scalable network filesystem that provides dynamic provisioning of PersistenVolumeClaims. + +### Starting Minikube +This addon works within Minikube, without any additional configuration. + +```shell +$ minikube start +``` + +### Enabling storage-provisioner-gluster +To enable this addon, simply run: + +``` +$ minikube addons enable storage-provisioner-gluster +``` + +Within one minute, the addon manager should pick up the change and you should see several Pods in the `storage-gluster` namespace: + +``` +$ kubectl -n storage-gluster get pods +NAME READY STATUS RESTARTS AGE +glusterfile-provisioner-dbcbf54fc-726vv 1/1 Running 0 1m +glusterfs-rvdmz 0/1 Running 0 40s +heketi-79997b9d85-42c49 0/1 ContainerCreating 0 40s +``` + +Some of the Pods need a little more time to get up an running than others, but in a few minutes everything should have been deployed and all Pods should be `READY`: + +``` +$ kubectl -n storage-gluster get pods +NAME READY STATUS RESTARTS AGE +glusterfile-provisioner-dbcbf54fc-726vv 1/1 Running 0 5m +glusterfs-rvdmz 1/1 Running 0 4m +heketi-79997b9d85-42c49 1/1 Running 1 4m +``` + +Once the Pods have status `Running`, the `glusterfile` StorageClass should have been marked as `default`: + +``` +$ kubectl get sc +NAME PROVISIONER AGE +glusterfile (default) gluster.org/glusterfile 3m +``` + +### Creating PVCs +The storage in the Gluster environment is limited to 10 GiB. This is because the data is stored in the Minikube VM (a sparse file `/srv/fake-disk.img`). + +The following `yaml` creates a PVC, starts a CentOS developer Pod that generates a website and deploys an NGINX webserver that provides access to the website: + +``` +--- +# +# Minimal PVC where a developer can build a website. +# +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: website +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 2Mi + storageClassName: glusterfile +--- +# +# This pod will just download a fortune phrase and store it (as plain text) in +# index.html on the PVC. This is how we create websites? +# +# The root of the website stored on the above PVC is mounted on /mnt. +# +apiVersion: v1 +kind: Pod +metadata: + name: centos-webdev +spec: + containers: + - image: centos:latest + name: centos + args: + - curl + - -o/mnt/index.html + - https://api.ef.gy/fortune + volumeMounts: + - mountPath: /mnt + name: website + # once the website is created, the pod will exit + restartPolicy: Never + volumes: + - name: website + persistentVolumeClaim: + claimName: website +--- +# +# Start a NGINX webserver with the website. +# We'll skip creating a service, to keep things minimal. +# +apiVersion: v1 +kind: Pod +metadata: + name: website-nginx +spec: + containers: + - image: gcr.io/google_containers/nginx-slim:0.8 + name: nginx + ports: + - containerPort: 80 + name: web + volumeMounts: + - mountPath: /usr/share/nginx/html + name: website + volumes: + - name: website + persistentVolumeClaim: + claimName: website +``` + +Because the PVC has been created with the `ReadWriteMany` accessMode, both Pods can access the PVC at the same time. Other website developer Pods can use the same PVC to update the contents of the site. + +The above configuration does not expose the website on the Minikube VM. One way to see the contents of the website is to SSH into the Minikube VM and fetch the website there: + +``` +$ kubectl get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE +centos-webdev 0/1 Completed 0 1m 172.17.0.9 minikube +website-nginx 1/1 Running 0 24s 172.17.0.9 minikube +$ minikube ssh + _ _ + _ _ ( ) ( ) + ___ ___ (_) ___ (_)| |/') _ _ | |_ __ +/' _ ` _ `\| |/' _ `\| || , < ( ) ( )| '_`\ /'__`\ +| ( ) ( ) || || ( ) || || |\`\ | (_) || |_) )( ___/ +(_) (_) (_)(_)(_) (_)(_)(_) (_)`\___/'(_,__/'`\____) + +$ curl http://172.17.0.9 +I came, I saw, I deleted all your files. +$ +``` + diff --git a/deploy/addons/storage-provisioner-gluster/glusterfs-daemonset.yaml b/deploy/addons/storage-provisioner-gluster/glusterfs-daemonset.yaml new file mode 100644 index 000000000000..1fac8513e4a3 --- /dev/null +++ b/deploy/addons/storage-provisioner-gluster/glusterfs-daemonset.yaml @@ -0,0 +1,138 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + namespace: storage-gluster + name: glusterfs + labels: + glusterfs: daemonset + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile + annotations: + description: GlusterFS DaemonSet + tags: glusterfs +spec: + selector: + matchLabels: + k8s-app: storage-provisioner-gluster + template: + metadata: + namespace: storage-gluster + name: glusterfs + labels: + glusterfs: pod + glusterfs-node: pod + k8s-app: storage-provisioner-gluster + spec: + #nodeSelector: + # kubernetes.io/hostname: minikube + hostNetwork: true + containers: + - image: quay.io/nixpanic/glusterfs-server:pr_fake-disk + imagePullPolicy: IfNotPresent + name: glusterfs + env: + - name: USE_FAKE_DISK + value: "enabled" + #- name: USE_FAKE_FILE + # value: "/srv/fake-disk.img" + #- name: USE_FAKE_SIZE + # value: "10G" + #- name: USE_FAKE_DEV + # value: "/dev/fake" + resources: + requests: + memory: 100Mi + cpu: 100m + volumeMounts: + # default location for fake-disk.img, it needs to be persistent + - name: fake-disk + mountPath: /srv + # the fstab for the bricks is under /var/lib/heketi + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + #- name: glusterfs-etc + # mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + # glusterfind uses /var/lib/misc/glusterfsd, yuck + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + - name: kernel-modules + mountPath: "/usr/lib/modules" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 50 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 40 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 25 + successThreshold: 1 + failureThreshold: 50 + volumes: + - name: fake-disk + hostPath: + path: /srv + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + - name: kernel-modules + hostPath: + path: "/usr/lib/modules" diff --git a/deploy/addons/storage-provisioner-gluster/heketi-deployment.yaml b/deploy/addons/storage-provisioner-gluster/heketi-deployment.yaml new file mode 100644 index 000000000000..2d72ace39c75 --- /dev/null +++ b/deploy/addons/storage-provisioner-gluster/heketi-deployment.yaml @@ -0,0 +1,158 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: storage-gluster + name: heketi-service-account + labels: + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile + name: heketi-sa-view +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: edit +subjects: +- kind: ServiceAccount + namespace: storage-gluster + name: heketi-service-account +--- +kind: Service +apiVersion: v1 +metadata: + namespace: storage-gluster + name: heketi + labels: + glusterfs: heketi-service + heketi: service + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile + annotations: + description: Exposes Heketi Service +spec: + selector: + glusterfs: heketi-pod + ports: + - name: heketi + port: 8080 + targetPort: 8080 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: storage-gluster + name: heketi-topology + labels: + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile +data: + minikube.json: |+ + { + "clusters": [ + { + "nodes": [ + { + "node": { + "hostnames": { + "manage": [ + "minikube" + ], + "storage": [ + "172.17.0.1" + ] + }, + "zone": 1 + }, + "devices": [ + "/dev/fake" + ] + } + ] + } + ] + } + + +--- +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + namespace: storage-gluster + name: heketi + labels: + glusterfs: heketi-deployment + heketi: deployment + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile + annotations: + description: Defines how to deploy Heketi +spec: + replicas: 1 + template: + metadata: + namespace: storage-gluster + name: heketi + labels: + glusterfs: heketi-pod + heketi: pod + k8s-app: storage-provisioner-gluster + spec: + serviceAccountName: heketi-service-account + containers: + - image: heketi/heketi:latest + imagePullPolicy: IfNotPresent + name: heketi + env: + - name: HEKETI_EXECUTOR + value: "kubernetes" + - name: HEKETI_FSTAB + value: "/var/lib/heketi/fstab" + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: "y" + - name: HEKETI_IGNORE_STALE_OPERATIONS + value: "true" + - name: HEKETI_GLUSTERAPP_LOGLEVEL + value: "debug" + # initial topology.json in case the db does not exist + - name: HEKETI_TOPOLOGY_FILE + value: "/etc/heketi/topology/minikube.json" + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: "/var/lib/heketi" + - name: initial-topology + mountPath: "/etc/heketi/topology" + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: "/hello" + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: "/hello" + port: 8080 + volumes: + - name: db + hostPath: + path: "/var/lib/heketi" + - name: initial-topology + configMap: + name: heketi-topology diff --git a/deploy/addons/storage-provisioner-gluster/storage-gluster-ns.yaml b/deploy/addons/storage-provisioner-gluster/storage-gluster-ns.yaml new file mode 100644 index 000000000000..6e192f48b85a --- /dev/null +++ b/deploy/addons/storage-provisioner-gluster/storage-gluster-ns.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: storage-gluster + labels: + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile diff --git a/deploy/addons/storage-provisioner-gluster/storage-provisioner-glusterfile.yaml b/deploy/addons/storage-provisioner-gluster/storage-provisioner-glusterfile.yaml new file mode 100644 index 000000000000..88c7a5ab89cf --- /dev/null +++ b/deploy/addons/storage-provisioner-gluster/storage-provisioner-glusterfile.yaml @@ -0,0 +1,109 @@ +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: glusterfile + labels: + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: EnsureExists + annotations: + storageclass.beta.kubernetes.io/is-default-class: "true" +provisioner: gluster.org/glusterfile +reclaimPolicy: Delete +parameters: + resturl: "http://heketi.storage-gluster.svc.cluster.local:8080" + restuser: admin +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: glusterfile-provisioner-runner + labels: + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get","create","delete"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["routes"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "create","delete"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: storage-gluster + name: glusterfile-provisioner + labels: + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: glusterfile-provisioner + labels: + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + kind: ClusterRole + name: glusterfile-provisioner-runner + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + namespace: storage-gluster + name: glusterfile-provisioner +--- +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + namespace: storage-gluster + name: glusterfile-provisioner + labels: + k8s-app: storage-provisioner-gluster + kubernetes.io/minikube-addons: storage-provisioner-gluster + addonmanager.kubernetes.io/mode: Reconcile + annotations: + description: Defines how to deploy the glusterfile provisioner pod. +spec: + replicas: 1 + strategy: + type: Recreate + template: + metadata: + namespace: storage-gluster + name: glusterfile-provisioner + labels: + glusterfs: file-provisioner-pod + glusterfile: provisioner-pod + spec: + serviceAccountName: glusterfile-provisioner + containers: + - name: glusterfile-provisioner + image: gluster/glusterfile-provisioner:latest + imagePullPolicy: Always + env: + - name: PROVISIONER_NAME + value: gluster.org/glusterfile diff --git a/docs/addons.md b/docs/addons.md index fdd22a88d61a..455ede5baa6a 100644 --- a/docs/addons.md +++ b/docs/addons.md @@ -13,6 +13,7 @@ $ minikube addons list - ingress: disabled - default-storageclass: enabled - storage-provisioner: enabled +- storage-provisioner-gluster: disabled - nvidia-driver-installer: disabled - nvidia-gpu-device-plugin: disabled @@ -37,6 +38,7 @@ The currently supported addons include: * [nvidia-driver-installer](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/nvidia-driver-installer/minikube) * [nvidia-gpu-device-plugin](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/cmd/nvidia_gpu) * [gvisor](../deploy/addons/gvisor/README.md) +* [storage-provisioner-gluster](../deploy/addons/storage-provisioner-gluster/README.md) If you would like to have minikube properly start/restart custom addons, place the addon(s) you wish to be launched with minikube in the `.minikube/addons` directory. Addons in this folder will be moved to the minikube VM and launched each time minikube is started/restarted. diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index 62dbde316178..ed22d80d1ad3 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -89,6 +89,45 @@ var Addons = map[string]*Addon{ "storage-provisioner.yaml", "0640"), }, true, "storage-provisioner"), + "storage-provisioner-gluster": NewAddon([]*BinDataAsset{ + NewBinDataAsset( + "deploy/addons/storage-provisioner-gluster/storage-gluster-ns.yaml", + constants.AddonsPath, + "storage-gluster-ns.yaml", + "0640"), + NewBinDataAsset( + "deploy/addons/storage-provisioner-gluster/glusterfs-daemonset.yaml", + constants.AddonsPath, + "glusterfs-daemonset.yaml", + "0640"), + NewBinDataAsset( + "deploy/addons/storage-provisioner-gluster/heketi-deployment.yaml", + constants.AddonsPath, + "heketi-deployment.yaml", + "0640"), + NewBinDataAsset( + "deploy/addons/storage-provisioner-gluster/storage-provisioner-glusterfile.yaml", + constants.AddonsPath, + "storage-privisioner-glusterfile.yaml", + "0640"), + }, false, "storage-provisioner-gluster"), + "kube-dns": NewAddon([]*BinDataAsset{ + NewBinDataAsset( + "deploy/addons/kube-dns/kube-dns-controller.yaml", + constants.AddonsPath, + "kube-dns-controller.yaml", + "0640"), + NewBinDataAsset( + "deploy/addons/kube-dns/kube-dns-cm.yaml", + constants.AddonsPath, + "kube-dns-cm.yaml", + "0640"), + NewBinDataAsset( + "deploy/addons/kube-dns/kube-dns-svc.yaml", + constants.AddonsPath, + "kube-dns-svc.yaml", + "0640"), + }, false, "kube-dns"), "heapster": NewAddon([]*BinDataAsset{ NewBinDataAsset( "deploy/addons/heapster/influx-grafana-rc.yaml", diff --git a/pkg/minikube/storageclass/storageclass.go b/pkg/minikube/storageclass/storageclass.go index 45dffd45c572..7762cb9b8589 100644 --- a/pkg/minikube/storageclass/storageclass.go +++ b/pkg/minikube/storageclass/storageclass.go @@ -17,17 +17,28 @@ limitations under the License. package storageclass import ( + "strconv" + "github.com/pkg/errors" + "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - "k8s.io/minikube/pkg/minikube/constants" ) +func annotateDefaultStorageClass(client *kubernetes.Clientset, class *v1.StorageClass, enable bool) error { + isDefault := strconv.FormatBool(enable) + + metav1.SetMetaDataAnnotation(&class.ObjectMeta, "storageclass.beta.kubernetes.io/is-default-class", isDefault) + _, err := client.Storage().StorageClasses().Update(class) + + return err +} + // DisableDefaultStorageClass disables the default storage class provisioner // The addon-manager and kubectl apply cannot delete storageclasses -func DisableDefaultStorageClass() error { +func DisableDefaultStorageClass(class string) error { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) config, err := kubeConfig.ClientConfig() @@ -39,9 +50,47 @@ func DisableDefaultStorageClass() error { return errors.Wrap(err, "Error creating new client from kubeConfig.ClientConfig()") } - err = client.Storage().StorageClasses().Delete(constants.DefaultStorageClassProvisioner, &metav1.DeleteOptions{}) + sc, err := client.Storage().StorageClasses().Get(class, metav1.GetOptions{}) + if err != nil { + return errors.Wrapf(err, "Error getting storage class %s", class) + } + + err = annotateDefaultStorageClass(client, sc, false) if err != nil { - return errors.Wrapf(err, "Error deleting default storage class %s", constants.DefaultStorageClassProvisioner) + return errors.Wrapf(err, "Error marking storage class %s as non-default", class) + } + + return nil +} + +// SetDefaultStorageClass makes sure onlt the class with @name is marked as +// default. +func SetDefaultStorageClass(name string) error { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + config, err := kubeConfig.ClientConfig() + if err != nil { + return errors.Wrap(err, "Error creating kubeConfig") + } + client, err := kubernetes.NewForConfig(config) + if err != nil { + return errors.Wrap(err, "Error creating new client from kubeConfig.ClientConfig()") + } + + scList, err := client.Storage().StorageClasses().List(metav1.ListOptions{}) + if err != nil { + return errors.Wrap(err, "Error listing StorageClasses") + } + + for _, sc := range scList.Items { + err = annotateDefaultStorageClass(client, &sc, sc.Name == name) + if err != nil { + isDefault := "non-default" + if sc.Name == name { + isDefault = "default" + } + return errors.Wrapf(err, "Error while marking storage class %s as %s", sc.Name, isDefault) + } } return nil