diff --git a/site/content/en/docs/tutorials/volume_snapshots_and_csi.md b/site/content/en/docs/tutorials/volume_snapshots_and_csi.md new file mode 100644 index 000000000000..661b128eb904 --- /dev/null +++ b/site/content/en/docs/tutorials/volume_snapshots_and_csi.md @@ -0,0 +1,40 @@ +--- +title: "CSI Driver and Volume Snapshots" +linkTitle: "CSI Driver and Volume Snapshots" +weight: 1 +date: 2020-08-06 +description: > + CSI Driver and Volume Snapshots +--- + +## Overview + +This tutorial explains how to set up the CSI Hostpath Driver in Minikube and create volume snapshots. + +## Prerequisites + +- Minikube 1.12.3 with Kubernetes 1.18.x or higher + +## Tutorial + +Support for volume snapshots in minikube is provided through the `volumesnapshots` addon. This addon provisions the required +CRDs and deploys the Volume Snapshot Controller. It is enabled by default. + +However, the default storage provider in minikube does not implement the CSI interface and thus is NOT capable of +creating/handling volume snapshots. To utilize the volume snapshots functionality, you must first deploy a CSI driver. +To make this easy, minikube offers the `csi-hostpath-driver` addon, which deploys the [CSI Hostpath Driver](https://github.com/kubernetes-csi/csi-driver-host-path). +By default, this addon is disabled. + +You can enable/disable either of the above-mentioned addons using +```shell script +minikube addons enable [ADDON_NAME] +minikube addons disable [ADDON_NAME] +``` + +The `csi-hostpath-driver` addon deploys its required resources into the `kube-system` namespace and sets up a dedicated +storage class called `csi-hostpath-sc` that you need to reference in your PVCs. The driver itself is created under the +name `hostpath.csi.k8s.io`. Use this wherever necessary (for example snapshot class definitions). + +Once both addons are enabled, you can create persistent volumes and snapshots using standard ways (for a quick test of +volume snapshots, you can find some example yaml files with a step-by-step [here](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html)). +The driver stores all persistent volumes in the `/var/lib/csi-hostpath-data/` directory of minikube's host. \ No newline at end of file diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 72322dc2f11f..91bf343a4485 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -40,7 +40,7 @@ func TestAddons(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer Cleanup(t, profile, cancel) - args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm", "--addons=volumesnapshots", "--addons=csi-hostpath-driver"}, StartArgs()...) if !NoneDriver() { // none doesn't support ingress args = append(args, "--addons=ingress") } @@ -60,6 +60,7 @@ func TestAddons(t *testing.T) { {"MetricsServer", validateMetricsServerAddon}, {"HelmTiller", validateHelmTillerAddon}, {"Olm", validateOlmAddon}, + {"CSI", validateCsiDriverAndSnapshots}, } for _, tc := range tests { tc := tc @@ -395,3 +396,108 @@ func validateOlmAddon(ctx context.Context, t *testing.T, profile string) { t.Errorf("failed checking operator installed: %v", err.Error()) } } + +func validateCsiDriverAndSnapshots(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + + client, err := kapi.Client(profile) + if err != nil { + t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err) + } + + start := time.Now() + if err := kapi.WaitForPods(client, "kube-system", "kubernetes.io/minikube-addons=csi-hostpath-driver", Minutes(6)); err != nil { + t.Errorf("failed waiting for csi-hostpath-driver pods to stabilize: %v", err) + } + t.Logf("csi-hostpath-driver pods stabilized in %s", time.Since(start)) + + // create sample PVC + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc.yaml"))) + if err != nil { + t.Logf("creating sample PVC with %s failed: %v", rr.Command(), err) + } + + if err := PvcWait(ctx, t, profile, "default", "hpvc", Minutes(6)); err != nil { + t.Fatalf("failed waiting for PVC hpvc: %v", err) + } + + // create sample pod with the PVC + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod.yaml"))) + if err != nil { + t.Logf("creating pod with %s failed: %v", rr.Command(), err) + } + + if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod", Minutes(6)); err != nil { + t.Fatalf("failed waiting for pod task-pv-pod: %v", err) + } + + // create sample snapshotclass + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshotclass.yaml"))) + if err != nil { + t.Logf("creating snapshostclass with %s failed: %v", rr.Command(), err) + } + + // create volume snapshot + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshot.yaml"))) + if err != nil { + t.Logf("creating pod with %s failed: %v", rr.Command(), err) + } + + if err := VolumeSnapshotWait(ctx, t, profile, "default", "new-snapshot-demo", Minutes(6)); err != nil { + t.Fatalf("failed waiting for volume snapshot new-snapshot-demo: %v", err) + } + + // delete pod + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod")) + if err != nil { + t.Logf("deleting pod with %s failed: %v", rr.Command(), err) + } + + // delete pvc + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc")) + if err != nil { + t.Logf("deleting pod with %s failed: %v", rr.Command(), err) + } + + // restore pv from snapshot + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc-restore.yaml"))) + if err != nil { + t.Logf("creating pvc with %s failed: %v", rr.Command(), err) + } + + if err = PvcWait(ctx, t, profile, "default", "hpvc-restore", Minutes(6)); err != nil { + t.Fatalf("failed waiting for PVC hpvc-restore: %v", err) + } + + // create pod from restored snapshot + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod-restore.yaml"))) + if err != nil { + t.Logf("creating pod with %s failed: %v", rr.Command(), err) + } + + if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod-restore", Minutes(6)); err != nil { + t.Fatalf("failed waiting for pod task-pv-pod-restore: %v", err) + } + + // CLEANUP + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod-restore")) + if err != nil { + t.Logf("cleanup with %s failed: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc-restore")) + if err != nil { + t.Logf("cleanup with %s failed: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "volumesnapshot", "new-snapshot-demo")) + if err != nil { + t.Logf("cleanup with %s failed: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "csi-hostpath-driver", "--alsologtostderr", "-v=1")) + if err != nil { + t.Errorf("failed to disable csi-hostpath-driver addon: args %q: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "volumesnapshots", "--alsologtostderr", "-v=1")) + if err != nil { + t.Errorf("failed to disable volumesnapshots addon: args %q: %v", rr.Command(), err) + } +} diff --git a/test/integration/helpers_test.go b/test/integration/helpers_test.go index 75b6f9d22ec5..1c6d7c9ef528 100644 --- a/test/integration/helpers_test.go +++ b/test/integration/helpers_test.go @@ -29,6 +29,7 @@ import ( "fmt" "io/ioutil" "os/exec" + "strconv" "strings" "testing" "time" @@ -372,6 +373,56 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec return names, fmt.Errorf("%s: %v", fmt.Sprintf("%s within %s", selector, timeout), err) } +//// PvcWait waits for persistent volume claim to reach bound state +func PvcWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error { + t.Helper() + + t.Logf("(dbg) %s: waiting %s for pvc %q in namespace %q ...", t.Name(), timeout, name, ns) + + f := func() (bool, error) { + ret, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "pvc", name, "-o", "jsonpath={.status.phase}", "-n", ns)) + if err != nil { + t.Logf("%s: WARNING: PVC get for %q %q returned: %v", t.Name(), ns, name, err) + return false, nil + } + + pvc := strings.TrimSpace(ret.Stdout.String()) + if pvc == string(core.ClaimBound) { + return true, nil + } else if pvc == string(core.ClaimLost) { + return true, fmt.Errorf("PVC %q is LOST", name) + } + return false, nil + } + + return wait.PollImmediate(1*time.Second, timeout, f) +} + +//// VolumeSnapshotWait waits for volume snapshot to be ready to use +func VolumeSnapshotWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error { + t.Helper() + + t.Logf("(dbg) %s: waiting %s for volume snapshot %q in namespace %q ...", t.Name(), timeout, name, ns) + + f := func() (bool, error) { + res, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "volumesnapshot", name, "-o", "jsonpath={.status.readyToUse}", "-n", ns)) + if err != nil { + t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, err) + return false, nil + } + + isReady, err := strconv.ParseBool(strings.TrimSpace(res.Stdout.String())) + if err != nil { + t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, res.Stdout.String()) + return false, nil + } + + return isReady, nil + } + + return wait.PollImmediate(1*time.Second, timeout, f) +} + // Status returns a minikube component status as a string func Status(ctx context.Context, t *testing.T, path string, profile string, key string, node string) string { t.Helper() diff --git a/test/integration/testdata/csi-hostpath-driver/pv-pod-restore.yaml b/test/integration/testdata/csi-hostpath-driver/pv-pod-restore.yaml new file mode 100644 index 000000000000..6a544d18d3df --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/pv-pod-restore.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: task-pv-pod-restore + labels: + app: task-pv-pod-restore +spec: + volumes: + - name: task-pv-storage + persistentVolumeClaim: + claimName: hpvc-restore + containers: + - name: task-pv-container + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: task-pv-storage + + diff --git a/test/integration/testdata/csi-hostpath-driver/pv-pod.yaml b/test/integration/testdata/csi-hostpath-driver/pv-pod.yaml new file mode 100644 index 000000000000..62df9996477c --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/pv-pod.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: task-pv-pod + labels: + app: task-pv-pod +spec: + volumes: + - name: task-pv-storage + persistentVolumeClaim: + claimName: hpvc + containers: + - name: task-pv-container + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: task-pv-storage + + diff --git a/test/integration/testdata/csi-hostpath-driver/pvc-restore.yaml b/test/integration/testdata/csi-hostpath-driver/pvc-restore.yaml new file mode 100644 index 000000000000..942d0cf8a875 --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/pvc-restore.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hpvc-restore +spec: + storageClassName: csi-hostpath-sc + dataSource: + name: new-snapshot-demo + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/test/integration/testdata/csi-hostpath-driver/pvc.yaml b/test/integration/testdata/csi-hostpath-driver/pvc.yaml new file mode 100644 index 000000000000..cb3c4560ddf8 --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hpvc +spec: + storageClassName: csi-hostpath-sc + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/test/integration/testdata/csi-hostpath-driver/snapshot.yaml b/test/integration/testdata/csi-hostpath-driver/snapshot.yaml new file mode 100644 index 000000000000..86a102b88d48 --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/snapshot.yaml @@ -0,0 +1,8 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshot +metadata: + name: new-snapshot-demo +spec: + volumeSnapshotClassName: csi-hostpath-snapclass + source: + persistentVolumeClaimName: hpvc diff --git a/test/integration/testdata/csi-hostpath-driver/snapshotclass.yaml b/test/integration/testdata/csi-hostpath-driver/snapshotclass.yaml new file mode 100644 index 000000000000..892dfd0c831d --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/snapshotclass.yaml @@ -0,0 +1,6 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: csi-hostpath-snapclass +driver: hostpath.csi.k8s.io #csi-hostpath +deletionPolicy: Delete