Skip to content

Commit

Permalink
v0.1.0 pre-release of volume group snapshot controller (#1)
Browse files Browse the repository at this point in the history
* v0.1.0 pre-release of volume group snapshotter.
Does not include dev-test and integration-test .

* Delete .idea directory
  • Loading branch information
eroytman authored Jun 18, 2021
1 parent 7d7191d commit b3a317e
Show file tree
Hide file tree
Showing 72 changed files with 4,845 additions and 1 deletion.
9 changes: 9 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
.vscode/
*.out
*.html
.idea
bin/
csi-vxflexos/
goscaleio/
dell-csi-extensions/

15 changes: 15 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3

LABEL vendor="Dell Inc." \
name="dellcsi-vg-snapshotter" \
summary="CSI VG Snapshotter for Dell EMC Powerflex" \
description="Dell Storage VolumeGroup Snapshot Controller for CSI" \
version="1.0.0" \
license="Apache-2.0"

#COPY licenses /licenses

RUN microdnf update -y && microdnf install -y tar gzip

COPY ./bin/vg-snapshotter .
ENTRYPOINT ["/vg-snapshotter"]
165 changes: 165 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
# VERSION defines the project version for the bundle.
# Update this value when you upgrade the version of your project.
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= 0.0.1

# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "preview,fast,stable")
# To re-generate a bundle for other specific channels without changing the standard setup, you can:
# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=preview,fast,stable)
# - use environment variables to overwrite this value (e.g export CHANNELS="preview,fast,stable")
ifneq ($(origin CHANNELS), undefined)
BUNDLE_CHANNELS := --channels=$(CHANNELS)
endif

# DEFAULT_CHANNEL defines the default channel used in the bundle.
# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")
# To re-generate a bundle for any other default channel without changing the default setup, you can:
# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)
# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")
ifneq ($(origin DEFAULT_CHANNEL), undefined)
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)

# BUNDLE_IMG defines the image:tag used for the bundle.
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
BUNDLE_IMG ?= controller-bundle:$(VERSION)

# Image URL to use all building/pushing image targets
IMG ?= controller:v0.93
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false"

# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif

all: build

##@ General

# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php

help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

##@ Development

manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases

generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."

fmt: ## Run go fmt against code.
go fmt ./...

vet: ## Run go vet against code.
go vet ./...

ENVTEST_ASSETS_DIR=$(shell pwd)/testbin
test: manifests generate fmt vet ## Run tests.
mkdir -p ${ENVTEST_ASSETS_DIR}
test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.2/hack/setup-envtest.sh
source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out

unit-test:
go clean -cache && go test -v -coverprofile=c.out ./controllers/dellcsi*

int-test: ## run int test using gherkin scenarios
cd test/integration-test && ./run.sh

dev-test: ## run dev test
cd test/dev-test && ./start_server.sh && go test dev_test.go -v

##@ Build

build: generate fmt vet ## Build manager binary.
go build -o bin/vg-snapshotter main.go

run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go

gosec:
gosec -exclude-dir=csi-vxflexos -exclude-dir=goscaleio -exclude-dir=dell-csi-extensions -quiet -log /tmp/gosec.log -out=gosecresults.csv -fmt=csv ./...

check: gosec
go mod tidy
gofmt -w ./.
golint -set_exit_status ./.
go vet ./...

docker-build: ## Build docker image with the manager.
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o bin/vg-snapshotter main.go
docker build -t ${IMG} .

podman-build: ## Build podman image with the manager.
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o bin/vg-snapshotter main.go
podman build -t ${IMG} .

docker-push: ## Push docker image with the manager.
docker push ${IMG}

##@ Deployment

install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl apply -f -

uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl delete -f -

deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -

undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/default | kubectl delete -f -


CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1)

KUSTOMIZE = $(shell pwd)/bin/kustomize
kustomize: ## Download kustomize locally if necessary.
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7)

# go-get-tool will 'go get' any package $2 and install it to $1.
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
define go-get-tool
@[ -f $(1) ] || { \
set -e ;\
TMP_DIR=$$(mktemp -d) ;\
cd $$TMP_DIR ;\
go mod init tmp ;\
echo "Downloading $(2)" ;\
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
rm -rf $$TMP_DIR ;\
}
endef

.PHONY: bundle ## Generate bundle manifests and metadata, then validate generated files.
bundle: manifests kustomize
operator-sdk generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
operator-sdk bundle validate ./bundle

.PHONY: bundle-build ## Build the bundle image.
bundle-build:
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
18 changes: 18 additions & 0 deletions PROJECT
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
domain: storage.dell.com
layout: go.kubebuilder.io/v3
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: dell-csi-volumegroup-snapshotter
repo: github.com/dell/dell-csi-volumegroup-snapshotter
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: storage.dell.com
group: volumegroup
kind: DellCsiVolumeGroupSnapshot
path: github.com/dell/dell-csi-volumegroup-snapshotter/api/v1alpha1
version: v1alpha1
version: "3"
168 changes: 167 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,167 @@
# csi-volumegroup-snapshotter
# CSM Volume Group Snapshotter
Many stateful Kubernetes applications use several persistent volumes to store data.
To create recoverable snapshots of volumes for these applications, it is necessary to have capability to create
consistent snapshots across all volumes of the application at the same time.
Dell CSM Volume Group Snapshotter is an operator which extends Kubernetes API to support crash-consistent
snapshots of groups of volumes.
This operator consists of VolumeGroupSnapshot CRD and csi-volumegroupsnapshotter controller. The csi-volumegroupsnapshotter
is a sidecar container, which runs in the controller pod of CSI driver.
The csi-volumegroupsnapshotter uses CSI extension, implemented by Dell EMC CSI drivers, to manage volume group snapshots on
backend arrays.

CSM Volume Group Snapshotter is currently in a Technical Preview Phase, and should be considered alpha software.
We are actively seeking feedback from users about its features.
Please provide feedback using <TBD> .
We will take that input, along with our own results from doing extensive testing,
and incrementally improve the software. We do not recommend or support it for production use at this time.

## Volume Group Snapshot CRD
In Kubernetes volume group snapshot objects are represented as instances of VolumeGroupSnapshot CRD.
Example of VolumeGroupSnapshot instance in Kubernetes:
```yaml
Name: vg1-snap1
Namespace: helmtest-vxflexos
Labels: <none>
Annotations: <none>
API Version: volumegroup.storage.dell.com/v1alpha1
Kind: DellCsiVolumeGroupSnapshot
Metadata:
Creation Timestamp: 2021-05-07T16:18:15Z
Generation: 1
Managed Fields:
API Version: volumegroup.storage.dell.com/v1alpha1
Fields Type: FieldsV1
.............
Manager: vg-snapshotter
Operation: Update
Time: 2021-05-07T16:18:17Z
Resource Version: 24607275
UID: c2f53f33-1bd6-40ef-b1df-59b85627834d
Spec:
Driver Name: csi-vxflexos.dellemc.com
Member Reclaim Policy: retain
Pvc Label: volumeGroup1
Volumesnapshotclass: vxflexos-snapclass
Status:
Creation Time: 2021-05-07T16:08:32Z
Snapshot Group ID: 4d4a2e5a36080e0f-bab0ef6900000002
Snapshots: vg1-snap1-0-pvol1,vg1-snap1-1-pvol0

```
To create an instance of VolumeGroupSnapshot in Kubernetes cluster, create .yaml file similar to this one VGS.yaml:
```
apiVersion: volumegroup.storage.dell.com/v1alpha1
kind: DellCsiVolumeGroupSnapshot
metadata:
name: "vg1-snap1"
namespace: "helmtest-vxflexos"
spec:
driverName: "csi-vxflexos.dellemc.com"
# defines how to process VolumeSnapshot members when volume group snapshot is deleted
# "retain" --- keep VolumeSnapshot instances
# "delete" --- delete VolumeSnapshot instances
memberReclaimPolicy: "retain"
# volume snapshot class to use for VolumeSnapshot members in volume group snapshot
volumesnapshotclass: "vxflexos-snapclass"
pvcLabel: "volumeGroup1"
```
Run command: `kubectl create -f VGS.yaml`

## csi-volumegroupsnapshotter Controller

The csi-volumegroupsnapshotter controller processes reconcile requests for VolumeGroupSnapshot events.
#### Reconcile logic for VolumeGroupSnapshot create event
Reconciliation steps:
1. Find all PVC instances with volume-group label as defined in `pvcLabel` attribute in volume group snapshot
```
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvol0
namespace: helmtest-vxflexos
labels:
volume-group: volumeGroup1
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 8Gi
storageClassName: vxflexos
```
2. Get volumeHandle for all corresponding PersistentVolume instances for this set of PVCs
3. Call CreateVolumeGroupSnapshot() CSI API extension method of CSI driver with list of volume handles and volume group
snapshot name
4. Once driver responds with list of Snapshot objects create
VolumeSnapshot and VolumeSnapshotContent instances for group members in Kubernetes. As a result, VolumeSnapshot and
VolumeSnapshotContent instances are created for each snapshot in the group. To associate VolumeSnapshot instances with
parent group, these objects are labeled with VolumeGroupSnapshot name.
5. Update status of VolumeGroupSnapshot to set groupID, creationTime and list of VolumeSnapshot member names.

#### Reconcile logic for VolumeGroupSnapshot delete event
Reconciliation steps:
1. Call DeleteVolumeGroupSnapshot CSI API extension method of CSI driver with volume group snapshot ID
2. Once driver responds, remove volume group snapshot label from VolumeSnapshot members
3. Delete VolumeGroupSnapshot in Kubernetes
3. Process member VolumeSnapshot instances based on the value of
`memberReclaimPolicy` in VolumeGroupSnapshot instance. For `delete` call Kubernetes API to delete each
VolumeSnapshot instance, for `retain` keep VolumeSnapshot instances

## CSI Extension for Volume Group Snapshot Operations in Drivers
The CSI extension API is defined [here](https://github.com/dell/dell-csi-extensions) under volumeGroupSnapshot.

## Supported CSI Drivers
Currently, in the initial Technical Preview, CSM Volume Group Snapshotter provides support to create and delete volume group
snapshots for PowerFlex array.
Additional array support in CSM Volume Group Snapshotter is planned for the near future.

## Deploying CSM Volume Group Snapshotter
Install Volume Group Snapshot alpha CRD in your cluster:
* wget https://github.com/dell/dell-csi-volumegroup-snapshotter/tree/master/config/crd/bases
* kubectl create -f config/crd/bases

Configure all the helm chart parameters described below before deploying the drivers.

### Helm Chart Installation

These installation instructions apply to the helm chart in the (PowerFlex CSI Driver) https://github.com/dell/csi-powerflex repository
version v1.5.0.
The drivers that support Helm chart deployment allow CSM Volume Group Snapshotter to be _optionally_ deployed
by variables in the chart. There is a _vgsnapshotter_ block specified in the _values.yaml_ file of the chart
that will look similar the text below by default:

```
# Volume Group Snapshotter feature is an optional feature under development and tech preview.
# Enable this feature only after contact support for additional information
vgsnapshotter:
enabled: false
image:
```
To deploy CSM Volume Group Snapshotter with the driver, the following changes are required:
1. Enable CSM Volume Group Snapshotter by changing the vgsnapshotter.enabled boolean to true.
2. Specify the Volume Group Snapshotter image to be used as vgsnapshotter.image .
3. Install PowerFlex driver with `csi_install.sh`

### How to Build Controller Image
```
git clone https://github.com/dell/dell-csi-volumegroup-snapshotter.git
cd dell-csi-volumegroup-snapshotter
make docker-build ( to build image or make podman-build)
```

## Testing Approach
### Unit Tests
To run unit tests, at top level of repository, run
```make unit-test```

### Integration Tests
To run integration tests, at top level of repository, run
```make int-test```
for more information, consult the [integration test README](test/integration-test/README.md)

### Helm Tests
To run the helm test, consult the [helm test README](test/helm/README.MD)


Loading

0 comments on commit b3a317e

Please sign in to comment.