From 9db690714dc9c00934f00a78e607ce3ad6d165ea Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Fri, 19 Jul 2019 16:13:49 +0800 Subject: [PATCH 01/34] kubebuilder init --domain azure --license none --- .envrc | 1 + .gitignore | 3 + Dockerfile | 25 ++++ Makefile | 64 +++++++++ PROJECT | 3 + config/certmanager/certificate.yaml | 24 ++++ config/certmanager/kustomization.yaml | 26 ++++ config/certmanager/kustomizeconfig.yaml | 16 +++ config/default/kustomization.yaml | 43 ++++++ config/default/manager_auth_proxy_patch.yaml | 24 ++++ config/default/manager_image_patch.yaml | 12 ++ .../manager_prometheus_metrics_patch.yaml | 19 +++ config/default/manager_webhook_patch.yaml | 23 ++++ config/default/webhookcainjection_patch.yaml | 15 +++ config/manager/kustomization.yaml | 2 + config/manager/manager.yaml | 39 ++++++ config/rbac/auth_proxy_role.yaml | 13 ++ config/rbac/auth_proxy_role_binding.yaml | 12 ++ config/rbac/auth_proxy_service.yaml | 18 +++ config/rbac/kustomization.yaml | 11 ++ config/rbac/leader_election_role.yaml | 26 ++++ config/rbac/leader_election_role_binding.yaml | 12 ++ config/rbac/role_binding.yaml | 12 ++ config/webhook/kustomization.yaml | 6 + config/webhook/kustomizeconfig.yaml | 25 ++++ config/webhook/service.yaml | 12 ++ go.mod | 9 ++ go.sum | 126 ++++++++++++++++++ hack/boilerplate.go.txt | 3 + main.go | 55 ++++++++ 30 files changed, 679 insertions(+) create mode 100644 .envrc create mode 100644 Dockerfile create mode 100644 Makefile create mode 100644 PROJECT create mode 100644 config/certmanager/certificate.yaml create mode 100644 config/certmanager/kustomization.yaml create mode 100644 config/certmanager/kustomizeconfig.yaml create mode 100644 config/default/kustomization.yaml create mode 100644 config/default/manager_auth_proxy_patch.yaml create mode 100644 config/default/manager_image_patch.yaml create mode 100644 config/default/manager_prometheus_metrics_patch.yaml create mode 100644 config/default/manager_webhook_patch.yaml create mode 100644 config/default/webhookcainjection_patch.yaml create mode 100644 config/manager/kustomization.yaml create mode 100644 config/manager/manager.yaml create mode 100644 config/rbac/auth_proxy_role.yaml create mode 100644 config/rbac/auth_proxy_role_binding.yaml create mode 100644 config/rbac/auth_proxy_service.yaml create mode 100644 config/rbac/kustomization.yaml create mode 100644 config/rbac/leader_election_role.yaml create mode 100644 config/rbac/leader_election_role_binding.yaml create mode 100644 config/rbac/role_binding.yaml create mode 100644 config/webhook/kustomization.yaml create mode 100644 config/webhook/kustomizeconfig.yaml create mode 100644 config/webhook/service.yaml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 hack/boilerplate.go.txt create mode 100644 main.go diff --git a/.envrc b/.envrc new file mode 100644 index 00000000000..37b10962dbc --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +export GO111MODULE=on diff --git a/.gitignore b/.gitignore index f1c181ec9c5..60a853ba4cd 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,6 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out + +bin/ +vendor/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000000..e6c589aebd9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,25 @@ +# Build the manager binary +FROM golang:1.12.5 as builder + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY main.go main.go +COPY api/ api/ +COPY controllers/ controllers/ + +# Build +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:latest +WORKDIR / +COPY --from=builder /workspace/manager . +ENTRYPOINT ["/manager"] diff --git a/Makefile b/Makefile new file mode 100644 index 00000000000..2d9d3dda15a --- /dev/null +++ b/Makefile @@ -0,0 +1,64 @@ + +# Image URL to use all building/pushing image targets +IMG ?= controller:latest +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +CRD_OPTIONS ?= "crd:trivialVersions=true" + +all: manager + +# Run tests +test: generate fmt vet manifests + go test ./api/... ./controllers/... -coverprofile cover.out + +# Build manager binary +manager: generate fmt vet + go build -o bin/manager main.go + +# Run against the configured Kubernetes cluster in ~/.kube/config +run: generate fmt vet + go run ./main.go + +# Install CRDs into a cluster +install: manifests + kubectl apply -f config/crd/bases + +# Deploy controller in the configured Kubernetes cluster in ~/.kube/config +deploy: manifests + kubectl apply -f config/crd/bases + kustomize build config/default | kubectl apply -f - + +# Generate manifests e.g. CRD, RBAC etc. +manifests: controller-gen + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +# Run go fmt against code +fmt: + go fmt ./... + +# Run go vet against code +vet: + go vet ./... + +# Generate code +generate: controller-gen + $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./api/... + +# Build the docker image +docker-build: test + docker build . -t ${IMG} + @echo "updating kustomize image patch file for manager resource" + sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml + +# Push the docker image +docker-push: + docker push ${IMG} + +# find or download controller-gen +# download controller-gen if necessary +controller-gen: +ifeq (, $(shell which controller-gen)) + go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.0-beta.2 +CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen +else +CONTROLLER_GEN=$(shell which controller-gen) +endif diff --git a/PROJECT b/PROJECT new file mode 100644 index 00000000000..17a54e0b035 --- /dev/null +++ b/PROJECT @@ -0,0 +1,3 @@ +version: "2" +domain: azure +repo: github.com/Azure/azure-service-operator diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml new file mode 100644 index 00000000000..9d6bad1e12c --- /dev/null +++ b/config/certmanager/certificate.yaml @@ -0,0 +1,24 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICENAME) and $(NAMESPACE) will be substituted by kustomize + commonName: $(SERVICENAME).$(NAMESPACE).svc + dnsNames: + - $(SERVICENAME).$(NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml new file mode 100644 index 00000000000..8181bc3a270 --- /dev/null +++ b/config/certmanager/kustomization.yaml @@ -0,0 +1,26 @@ +resources: +- certificate.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +- name: NAMESPACE # namespace of the service and the certificate CR + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace +- name: CERTIFICATENAME + objref: + kind: Certificate + group: certmanager.k8s.io + version: v1alpha1 + name: serving-cert # this name should match the one in certificate.yaml +- name: SERVICENAME + objref: + kind: Service + version: v1 + name: webhook-service + +configurations: +- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 00000000000..49e0b1e7a46 --- /dev/null +++ b/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,16 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: certmanager.k8s.io + fieldSpecs: + - kind: Certificate + group: certmanager.k8s.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: certmanager.k8s.io + path: spec/commonName +- kind: Certificate + group: certmanager.k8s.io + path: spec/dnsNames diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 00000000000..f62b8280aa8 --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,43 @@ +# Adds namespace to all resources. +namespace: azure-service-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: azure-service-operator- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +bases: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment next line. 'WEBHOOK' components are required. +#- ../certmanager + +patches: +- manager_image_patch.yaml + # Protect the /metrics endpoint by putting it behind auth. + # Only one of manager_auth_proxy_patch.yaml and + # manager_prometheus_metrics_patch.yaml should be enabled. +- manager_auth_proxy_patch.yaml + # If you want your controller-manager to expose the /metrics + # endpoint w/o any authn/z, uncomment the following line and + # comment manager_auth_proxy_patch.yaml. + # Only one of manager_auth_proxy_patch.yaml and + # manager_prometheus_metrics_patch.yaml should be enabled. +#- manager_prometheus_metrics_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CAINJECTION] Uncomment next line to enable the CA injection in the admission webhooks. +# Uncomment 'CAINJECTION' in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml new file mode 100644 index 00000000000..d3994fb918f --- /dev/null +++ b/config/default/manager_auth_proxy_patch.yaml @@ -0,0 +1,24 @@ +# This patch inject a sidecar container which is a HTTP proxy for the controller manager, +# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=10" + ports: + - containerPort: 8443 + name: https + - name: manager + args: + - "--metrics-addr=127.0.0.1:8080" diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml new file mode 100644 index 00000000000..eb909570e17 --- /dev/null +++ b/config/default/manager_image_patch.yaml @@ -0,0 +1,12 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + # Change the value of image field below to your controller image URL + - image: IMAGE_URL + name: manager diff --git a/config/default/manager_prometheus_metrics_patch.yaml b/config/default/manager_prometheus_metrics_patch.yaml new file mode 100644 index 00000000000..0b96c6813e0 --- /dev/null +++ b/config/default/manager_prometheus_metrics_patch.yaml @@ -0,0 +1,19 @@ +# This patch enables Prometheus scraping for the manager pod. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + metadata: + annotations: + prometheus.io/scrape: 'true' + spec: + containers: + # Expose the prometheus metrics on default port + - name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 00000000000..f2f7157b464 --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml new file mode 100644 index 00000000000..f6d71cb768f --- /dev/null +++ b/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,15 @@ +# This patch add annotation to admission webhook config and +# the variables $(NAMESPACE) and $(CERTIFICATENAME) will be substituted by kustomize. +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration + annotations: + certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration + annotations: + certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 00000000000..5c5f0b84cba --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 00000000000..b6c85a52d5f --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - command: + - /manager + args: + - --enable-leader-election + image: controller:latest + name: manager + resources: + limits: + cpu: 100m + memory: 30Mi + requests: + cpu: 100m + memory: 20Mi + terminationGracePeriodSeconds: 10 diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml new file mode 100644 index 00000000000..618f5e4177c --- /dev/null +++ b/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: proxy-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 00000000000..48ed1e4b85c --- /dev/null +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml new file mode 100644 index 00000000000..d61e5469fb5 --- /dev/null +++ b/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "8443" + prometheus.io/scheme: https + prometheus.io/scrape: "true" + labels: + control-plane: controller-manager + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 00000000000..817f1fe6138 --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 3 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 00000000000..85093a8c240 --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,26 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 00000000000..eed16906f4d --- /dev/null +++ b/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 00000000000..8f2658702c8 --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 00000000000..9cf26134e4d --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 00000000000..25e21e3c963 --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 00000000000..b4861025ab4 --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,12 @@ + +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: 443 + selector: + control-plane: controller-manager diff --git a/go.mod b/go.mod new file mode 100644 index 00000000000..2d7d606aa0b --- /dev/null +++ b/go.mod @@ -0,0 +1,9 @@ +module github.com/Azure/azure-service-operator + +go 1.12 + +require ( + k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d + k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible + sigs.k8s.io/controller-runtime v0.2.0-beta.2 +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000000..39d372ab640 --- /dev/null +++ b/go.sum @@ -0,0 +1,126 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= +github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= +github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/controller-runtime v0.2.0-beta.2 h1:hOWldx1qmGI9TsU+uUsq1xTgVmUV7AZo08VAYX0dwGI= +sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= +sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= +sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 00000000000..571a53fa82b --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,3 @@ +/* +. +*/ \ No newline at end of file diff --git a/main.go b/main.go new file mode 100644 index 00000000000..2ed3155f9e8 --- /dev/null +++ b/main.go @@ -0,0 +1,55 @@ +/* +. +*/ + +package main + +import ( + "flag" + "os" + + "k8s.io/apimachinery/pkg/runtime" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + + // +kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + flag.Parse() + + ctrl.SetLogger(zap.Logger(true)) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + LeaderElection: enableLeaderElection, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} From 045d9dd5d513c4b2d4e6efa8aaf9cbb7ae71f346 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Fri, 19 Jul 2019 16:16:06 +0800 Subject: [PATCH 02/34] kubebuilder create api --group service --version v1alpha1 --kind Storage --- PROJECT | 4 + api/v1alpha1/groupversion_info.go | 24 +++++ api/v1alpha1/storage_types.go | 48 +++++++++ api/v1alpha1/storage_types_test.go | 65 +++++++++++ api/v1alpha1/suite_test.go | 63 +++++++++++ api/v1alpha1/zz_generated.deepcopy.go | 102 ++++++++++++++++++ config/crd/kustomization.yaml | 19 ++++ config/crd/kustomizeconfig.yaml | 17 +++ .../crd/patches/cainjection_in_storages.yaml | 8 ++ config/crd/patches/webhook_in_storages.yaml | 17 +++ config/samples/service_v1alpha1_storage.yaml | 7 ++ controllers/storage_controller.go | 39 +++++++ controllers/suite_test.go | 67 ++++++++++++ go.mod | 4 + main.go | 11 ++ 15 files changed, 495 insertions(+) create mode 100644 api/v1alpha1/groupversion_info.go create mode 100644 api/v1alpha1/storage_types.go create mode 100644 api/v1alpha1/storage_types_test.go create mode 100644 api/v1alpha1/suite_test.go create mode 100644 api/v1alpha1/zz_generated.deepcopy.go create mode 100644 config/crd/kustomization.yaml create mode 100644 config/crd/kustomizeconfig.yaml create mode 100644 config/crd/patches/cainjection_in_storages.yaml create mode 100644 config/crd/patches/webhook_in_storages.yaml create mode 100644 config/samples/service_v1alpha1_storage.yaml create mode 100644 controllers/storage_controller.go create mode 100644 controllers/suite_test.go diff --git a/PROJECT b/PROJECT index 17a54e0b035..7738262759f 100644 --- a/PROJECT +++ b/PROJECT @@ -1,3 +1,7 @@ version: "2" domain: azure repo: github.com/Azure/azure-service-operator +resources: +- group: service + version: v1alpha1 + kind: Storage diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go new file mode 100644 index 00000000000..b4ace7b75ec --- /dev/null +++ b/api/v1alpha1/groupversion_info.go @@ -0,0 +1,24 @@ +/* +. +*/ + +// Package v1alpha1 contains API Schema definitions for the service v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=service.azure +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "service.azure", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha1/storage_types.go b/api/v1alpha1/storage_types.go new file mode 100644 index 00000000000..726d839c425 --- /dev/null +++ b/api/v1alpha1/storage_types.go @@ -0,0 +1,48 @@ +/* +. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// StorageSpec defines the desired state of Storage +type StorageSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// StorageStatus defines the observed state of Storage +type StorageStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true + +// Storage is the Schema for the storages API +type Storage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StorageSpec `json:"spec,omitempty"` + Status StorageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StorageList contains a list of Storage +type StorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Storage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Storage{}, &StorageList{}) +} diff --git a/api/v1alpha1/storage_types_test.go b/api/v1alpha1/storage_types_test.go new file mode 100644 index 00000000000..79523c976c5 --- /dev/null +++ b/api/v1alpha1/storage_types_test.go @@ -0,0 +1,65 @@ +/* +. +*/ + +package v1alpha1 + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// These tests are written in BDD-style using Ginkgo framework. Refer to +// http://onsi.github.io/ginkgo to learn more. + +var _ = Describe("Storage", func() { + var ( + key types.NamespacedName + created, fetched *Storage + ) + + BeforeEach(func() { + // Add any setup steps that needs to be executed before each test + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + }) + + // Add Tests for OpenAPI validation (or additonal CRD features) specified in + // your API definition. + // Avoid adding tests for vanilla CRUD operations because they would + // test Kubernetes API server, which isn't the goal here. + Context("Create API", func() { + + It("should create an object successfully", func() { + + key = types.NamespacedName{ + Name: "foo", + Namespace: "default", + } + created = &Storage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }} + + By("creating an API obj") + Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) + + fetched = &Storage{} + Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) + Expect(fetched).To(Equal(created)) + + By("deleting the created object") + Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) + Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) + }) + + }) + +}) diff --git a/api/v1alpha1/suite_test.go b/api/v1alpha1/suite_test.go new file mode 100644 index 00000000000..aacdc648714 --- /dev/null +++ b/api/v1alpha1/suite_test.go @@ -0,0 +1,63 @@ +/* +. +*/ + +package v1alpha1 + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "v1alpha1 Suite", + []Reporter{envtest.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + } + + err := SchemeBuilder.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..329fda5f96f --- /dev/null +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,102 @@ +// +build !ignore_autogenerated + +/* +. +*/ + +// autogenerated by controller-gen object, do not modify manually + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Storage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageList) DeepCopyInto(out *StorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. +func (in *StorageList) DeepCopy() *StorageList { + if in == nil { + return nil + } + out := new(StorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { + if in == nil { + return nil + } + out := new(StorageStatus) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 00000000000..0fd1192062e --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,19 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/service.azure_storages.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_storages.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CAINJECTION] patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_storages.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 00000000000..6f83d9a94bc --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,17 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/crd/patches/cainjection_in_storages.yaml b/config/crd/patches/cainjection_in_storages.yaml new file mode 100644 index 00000000000..e371d696a51 --- /dev/null +++ b/config/crd/patches/cainjection_in_storages.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) + name: storages.service.azure diff --git a/config/crd/patches/webhook_in_storages.yaml b/config/crd/patches/webhook_in_storages.yaml new file mode 100644 index 00000000000..5811750b289 --- /dev/null +++ b/config/crd/patches/webhook_in_storages.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: storages.service.azure +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/samples/service_v1alpha1_storage.yaml b/config/samples/service_v1alpha1_storage.yaml new file mode 100644 index 00000000000..6ccb130127e --- /dev/null +++ b/config/samples/service_v1alpha1_storage.yaml @@ -0,0 +1,7 @@ +apiVersion: service.azure/v1alpha1 +kind: Storage +metadata: + name: storage-sample +spec: + # Add fields here + foo: bar diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go new file mode 100644 index 00000000000..d1f7e7ab3c5 --- /dev/null +++ b/controllers/storage_controller.go @@ -0,0 +1,39 @@ +/* +. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" +) + +// StorageReconciler reconciles a Storage object +type StorageReconciler struct { + client.Client + Log logr.Logger +} + +// +kubebuilder:rbac:groups=service.azure,resources=storages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=service.azure,resources=storages/status,verbs=get;update;patch + +func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + _ = context.Background() + _ = r.Log.WithValues("storage", req.NamespacedName) + + // your logic here + + return ctrl.Result{}, nil +} + +func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&servicev1alpha1.Storage{}). + Complete(r) +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go new file mode 100644 index 00000000000..a9c49ec3068 --- /dev/null +++ b/controllers/suite_test.go @@ -0,0 +1,67 @@ +/* +. +*/ + +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{envtest.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + + cfg, err := testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = servicev1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/go.mod b/go.mod index 2d7d606aa0b..2bee5f79233 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,10 @@ module github.com/Azure/azure-service-operator go 1.12 require ( + github.com/go-logr/logr v0.1.0 + github.com/onsi/ginkgo v1.6.0 + github.com/onsi/gomega v1.4.2 + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible sigs.k8s.io/controller-runtime v0.2.0-beta.2 diff --git a/main.go b/main.go index 2ed3155f9e8..78c19a562d8 100644 --- a/main.go +++ b/main.go @@ -8,6 +8,8 @@ import ( "flag" "os" + servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "github.com/Azure/azure-service-operator/controllers" "k8s.io/apimachinery/pkg/runtime" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" @@ -22,6 +24,7 @@ var ( func init() { + servicev1alpha1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } @@ -45,6 +48,14 @@ func main() { os.Exit(1) } + err = (&controllers.StorageReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Storage"), + }).SetupWithManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Storage") + os.Exit(1) + } // +kubebuilder:scaffold:builder setupLog.Info("starting manager") From 4e3a7238a22510aa781c715dedb5e95c24ddeb16 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Fri, 19 Jul 2019 16:16:52 +0800 Subject: [PATCH 03/34] kubebuilder create api --group service --version v1alpha1 --kind CosmosDB --- PROJECT | 3 + api/v1alpha1/cosmosdb_types.go | 48 ++++++++++ api/v1alpha1/cosmosdb_types_test.go | 65 ++++++++++++++ api/v1alpha1/zz_generated.deepcopy.go | 89 +++++++++++++++++++ config/crd/kustomization.yaml | 3 + .../crd/patches/cainjection_in_cosmosdbs.yaml | 8 ++ config/crd/patches/webhook_in_cosmosdbs.yaml | 17 ++++ config/samples/service_v1alpha1_cosmosdb.yaml | 7 ++ controllers/cosmosdb_controller.go | 39 ++++++++ controllers/suite_test.go | 3 + main.go | 8 ++ 11 files changed, 290 insertions(+) create mode 100644 api/v1alpha1/cosmosdb_types.go create mode 100644 api/v1alpha1/cosmosdb_types_test.go create mode 100644 config/crd/patches/cainjection_in_cosmosdbs.yaml create mode 100644 config/crd/patches/webhook_in_cosmosdbs.yaml create mode 100644 config/samples/service_v1alpha1_cosmosdb.yaml create mode 100644 controllers/cosmosdb_controller.go diff --git a/PROJECT b/PROJECT index 7738262759f..908a6362491 100644 --- a/PROJECT +++ b/PROJECT @@ -5,3 +5,6 @@ resources: - group: service version: v1alpha1 kind: Storage +- group: service + version: v1alpha1 + kind: CosmosDB diff --git a/api/v1alpha1/cosmosdb_types.go b/api/v1alpha1/cosmosdb_types.go new file mode 100644 index 00000000000..78bc6b8ed16 --- /dev/null +++ b/api/v1alpha1/cosmosdb_types.go @@ -0,0 +1,48 @@ +/* +. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// CosmosDBSpec defines the desired state of CosmosDB +type CosmosDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// CosmosDBStatus defines the observed state of CosmosDB +type CosmosDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true + +// CosmosDB is the Schema for the cosmosdbs API +type CosmosDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CosmosDBSpec `json:"spec,omitempty"` + Status CosmosDBStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CosmosDBList contains a list of CosmosDB +type CosmosDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CosmosDB `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CosmosDB{}, &CosmosDBList{}) +} diff --git a/api/v1alpha1/cosmosdb_types_test.go b/api/v1alpha1/cosmosdb_types_test.go new file mode 100644 index 00000000000..5cced1f3e77 --- /dev/null +++ b/api/v1alpha1/cosmosdb_types_test.go @@ -0,0 +1,65 @@ +/* +. +*/ + +package v1alpha1 + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// These tests are written in BDD-style using Ginkgo framework. Refer to +// http://onsi.github.io/ginkgo to learn more. + +var _ = Describe("CosmosDB", func() { + var ( + key types.NamespacedName + created, fetched *CosmosDB + ) + + BeforeEach(func() { + // Add any setup steps that needs to be executed before each test + }) + + AfterEach(func() { + // Add any teardown steps that needs to be executed after each test + }) + + // Add Tests for OpenAPI validation (or additonal CRD features) specified in + // your API definition. + // Avoid adding tests for vanilla CRUD operations because they would + // test Kubernetes API server, which isn't the goal here. + Context("Create API", func() { + + It("should create an object successfully", func() { + + key = types.NamespacedName{ + Name: "foo", + Namespace: "default", + } + created = &CosmosDB{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }} + + By("creating an API obj") + Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) + + fetched = &CosmosDB{} + Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) + Expect(fetched).To(Equal(created)) + + By("deleting the created object") + Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) + Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) + }) + + }) + +}) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 329fda5f96f..0fd637d435f 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -12,6 +12,95 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDB) DeepCopyInto(out *CosmosDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDB. +func (in *CosmosDB) DeepCopy() *CosmosDB { + if in == nil { + return nil + } + out := new(CosmosDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CosmosDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBList) DeepCopyInto(out *CosmosDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CosmosDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBList. +func (in *CosmosDBList) DeepCopy() *CosmosDBList { + if in == nil { + return nil + } + out := new(CosmosDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CosmosDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBSpec) DeepCopyInto(out *CosmosDBSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBSpec. +func (in *CosmosDBSpec) DeepCopy() *CosmosDBSpec { + if in == nil { + return nil + } + out := new(CosmosDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBStatus) DeepCopyInto(out *CosmosDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBStatus. +func (in *CosmosDBStatus) DeepCopy() *CosmosDBStatus { + if in == nil { + return nil + } + out := new(CosmosDBStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Storage) DeepCopyInto(out *Storage) { *out = *in diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 0fd1192062e..0463d4e239b 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,15 +3,18 @@ # It should be run by config/default resources: - bases/service.azure_storages.yaml +- bases/service.azure_cosmosdbs.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: # [WEBHOOK] patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_storages.yaml +#- patches/webhook_in_cosmosdbs.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CAINJECTION] patches here are for enabling the CA injection for each CRD #- patches/cainjection_in_storages.yaml +#- patches/cainjection_in_cosmosdbs.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_cosmosdbs.yaml b/config/crd/patches/cainjection_in_cosmosdbs.yaml new file mode 100644 index 00000000000..d14bfce13fd --- /dev/null +++ b/config/crd/patches/cainjection_in_cosmosdbs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) + name: cosmosdbs.service.azure diff --git a/config/crd/patches/webhook_in_cosmosdbs.yaml b/config/crd/patches/webhook_in_cosmosdbs.yaml new file mode 100644 index 00000000000..7ecf9d9fe29 --- /dev/null +++ b/config/crd/patches/webhook_in_cosmosdbs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cosmosdbs.service.azure +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/samples/service_v1alpha1_cosmosdb.yaml b/config/samples/service_v1alpha1_cosmosdb.yaml new file mode 100644 index 00000000000..f24eec665d9 --- /dev/null +++ b/config/samples/service_v1alpha1_cosmosdb.yaml @@ -0,0 +1,7 @@ +apiVersion: service.azure/v1alpha1 +kind: CosmosDB +metadata: + name: cosmosdb-sample +spec: + # Add fields here + foo: bar diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go new file mode 100644 index 00000000000..e82392cc353 --- /dev/null +++ b/controllers/cosmosdb_controller.go @@ -0,0 +1,39 @@ +/* +. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" +) + +// CosmosDBReconciler reconciles a CosmosDB object +type CosmosDBReconciler struct { + client.Client + Log logr.Logger +} + +// +kubebuilder:rbac:groups=service.azure,resources=cosmosdbs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=service.azure,resources=cosmosdbs/status,verbs=get;update;patch + +func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + _ = context.Background() + _ = r.Log.WithValues("cosmosdb", req.NamespacedName) + + // your logic here + + return ctrl.Result{}, nil +} + +func (r *CosmosDBReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&servicev1alpha1.CosmosDB{}). + Complete(r) +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index a9c49ec3068..ecb60b26e05 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -51,6 +51,9 @@ var _ = BeforeSuite(func(done Done) { err = servicev1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = servicev1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) diff --git a/main.go b/main.go index 78c19a562d8..421577de2f4 100644 --- a/main.go +++ b/main.go @@ -56,6 +56,14 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Storage") os.Exit(1) } + err = (&controllers.CosmosDBReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("CosmosDB"), + }).SetupWithManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create controller", "controller", "CosmosDB") + os.Exit(1) + } // +kubebuilder:scaffold:builder setupLog.Info("starting manager") From 57fb60bab7f750a0eb4e3c0e11820dc4dd4a0c24 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Fri, 19 Jul 2019 16:20:16 +0800 Subject: [PATCH 04/34] Add MIT License --- api/v1alpha1/cosmosdb_types.go | 22 +++++++++++++++++++++- api/v1alpha1/cosmosdb_types_test.go | 22 +++++++++++++++++++++- api/v1alpha1/groupversion_info.go | 22 +++++++++++++++++++++- api/v1alpha1/storage_types.go | 22 +++++++++++++++++++++- api/v1alpha1/storage_types_test.go | 22 +++++++++++++++++++++- api/v1alpha1/suite_test.go | 22 +++++++++++++++++++++- api/v1alpha1/zz_generated.deepcopy.go | 22 +++++++++++++++++++++- controllers/cosmosdb_controller.go | 22 +++++++++++++++++++++- controllers/storage_controller.go | 22 +++++++++++++++++++++- controllers/suite_test.go | 22 +++++++++++++++++++++- hack/boilerplate.go.txt | 24 ++++++++++++++++++++++-- main.go | 22 +++++++++++++++++++++- 12 files changed, 253 insertions(+), 13 deletions(-) diff --git a/api/v1alpha1/cosmosdb_types.go b/api/v1alpha1/cosmosdb_types.go index 78bc6b8ed16..0a5498f3f07 100644 --- a/api/v1alpha1/cosmosdb_types.go +++ b/api/v1alpha1/cosmosdb_types.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ package v1alpha1 diff --git a/api/v1alpha1/cosmosdb_types_test.go b/api/v1alpha1/cosmosdb_types_test.go index 5cced1f3e77..65273803537 100644 --- a/api/v1alpha1/cosmosdb_types_test.go +++ b/api/v1alpha1/cosmosdb_types_test.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ package v1alpha1 diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go index b4ace7b75ec..e76004295af 100644 --- a/api/v1alpha1/groupversion_info.go +++ b/api/v1alpha1/groupversion_info.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ // Package v1alpha1 contains API Schema definitions for the service v1alpha1 API group diff --git a/api/v1alpha1/storage_types.go b/api/v1alpha1/storage_types.go index 726d839c425..dc22a57157f 100644 --- a/api/v1alpha1/storage_types.go +++ b/api/v1alpha1/storage_types.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ package v1alpha1 diff --git a/api/v1alpha1/storage_types_test.go b/api/v1alpha1/storage_types_test.go index 79523c976c5..2f5353a1d8a 100644 --- a/api/v1alpha1/storage_types_test.go +++ b/api/v1alpha1/storage_types_test.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ package v1alpha1 diff --git a/api/v1alpha1/suite_test.go b/api/v1alpha1/suite_test.go index aacdc648714..6c8a9507359 100644 --- a/api/v1alpha1/suite_test.go +++ b/api/v1alpha1/suite_test.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ package v1alpha1 diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0fd637d435f..83e3c9fe34a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,27 @@ // +build !ignore_autogenerated /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ // autogenerated by controller-gen object, do not modify manually diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go index e82392cc353..25584193218 100644 --- a/controllers/cosmosdb_controller.go +++ b/controllers/cosmosdb_controller.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ package controllers diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index d1f7e7ab3c5..a54d5164272 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ package controllers diff --git a/controllers/suite_test.go b/controllers/suite_test.go index ecb60b26e05..d50e540e197 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ package controllers diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 571a53fa82b..a103642e6e9 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,3 +1,23 @@ /* -. -*/ \ No newline at end of file +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ diff --git a/main.go b/main.go index 421577de2f4..37310630cb2 100644 --- a/main.go +++ b/main.go @@ -1,5 +1,25 @@ /* -. +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE */ package main From f7f701657f182510eb1a35135fcb98c5fefe6f18 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Fri, 19 Jul 2019 17:05:38 +0800 Subject: [PATCH 05/34] Initial codes to support Azure Storage Account --- api/v1alpha1/storage_types.go | 16 + api/v1alpha1/zz_generated.deepcopy.go | 23 +- config/crd/bases/service.azure_cosmosdbs.yaml | 400 ++++++++++++++++ config/crd/bases/service.azure_storages.yaml | 430 ++++++++++++++++++ config/rbac/role.yaml | 48 ++ config/samples/service_v1alpha1_storage.yaml | 10 +- config/webhook/manifests.yaml | 0 controllers/storage_controller.go | 110 ++++- go.mod | 13 +- go.sum | 149 ++++++ pkg/client/deployment/deployment.go | 49 ++ pkg/client/group/group.go | 40 ++ pkg/config/config.go | 67 +++ pkg/helpers/helpers.go | 21 + pkg/iam/authorizers.go | 58 +++ pkg/storage/storage_template.go | 50 ++ pkg/template/storage.json | 43 ++ 17 files changed, 1518 insertions(+), 9 deletions(-) create mode 100644 config/crd/bases/service.azure_cosmosdbs.yaml create mode 100644 config/crd/bases/service.azure_storages.yaml create mode 100644 config/rbac/role.yaml create mode 100644 config/webhook/manifests.yaml create mode 100644 pkg/client/deployment/deployment.go create mode 100644 pkg/client/group/group.go create mode 100644 pkg/config/config.go create mode 100644 pkg/helpers/helpers.go create mode 100644 pkg/iam/authorizers.go create mode 100644 pkg/storage/storage_template.go create mode 100644 pkg/template/storage.json diff --git a/api/v1alpha1/storage_types.go b/api/v1alpha1/storage_types.go index dc22a57157f..e16f491bbe6 100644 --- a/api/v1alpha1/storage_types.go +++ b/api/v1alpha1/storage_types.go @@ -26,6 +26,8 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + storagesdk "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! @@ -35,15 +37,29 @@ import ( type StorageSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file + Location string `json:"location,omitempty"` + AccountName string `json:"accountName,omitempty"` + Sku Sku `json:"sku,omitempty"` + Kind storagesdk.Kind `json:"kind,omitempty"` + AccessTier storagesdk.AccessTier `json:"accessTier,omitempty"` + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` +} + +// Sku the SKU of the storage account. +type Sku struct { + // Name - The SKU name. Required for account creation; optional for update. Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS' + Name storagesdk.SkuName `json:"name,omitempty"` } // StorageStatus defines the observed state of Storage type StorageStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + ResourceGroupName string `json:"resourceGroupName,omitempty"` } // +kubebuilder:object:root=true +// +kubebuilder:subresource:status // Storage is the Schema for the storages API type Storage struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 83e3c9fe34a..237ca5105b0 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -121,12 +121,27 @@ func (in *CosmosDBStatus) DeepCopy() *CosmosDBStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sku) DeepCopyInto(out *Sku) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sku. +func (in *Sku) DeepCopy() *Sku { + if in == nil { + return nil + } + out := new(Sku) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Storage) DeepCopyInto(out *Storage) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status } @@ -183,6 +198,12 @@ func (in *StorageList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { *out = *in + out.Sku = in.Sku + if in.EnableHTTPSTrafficOnly != nil { + in, out := &in.EnableHTTPSTrafficOnly, &out.EnableHTTPSTrafficOnly + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. diff --git a/config/crd/bases/service.azure_cosmosdbs.yaml b/config/crd/bases/service.azure_cosmosdbs.yaml new file mode 100644 index 00000000000..af8e7da57a8 --- /dev/null +++ b/config/crd/bases/service.azure_cosmosdbs.yaml @@ -0,0 +1,400 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: cosmosdbs.service.azure +spec: + group: service.azure + names: + kind: CosmosDB + plural: cosmosdbs + scope: "" + validation: + openAPIV3Schema: + description: CosmosDB is the Schema for the cosmosdbs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with + a resource that may be set by external tools to store and retrieve + arbitrary metadata. They are not queryable and should be preserved + when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + clusterName: + description: The name of the cluster which the object belongs to. This + is used to distinguish resources with same name and namespace in different + clusters. This field is not set anywhere right now and apiserver is + going to ignore it if set in create or update request. + type: string + creationTimestamp: + description: "CreationTimestamp is a timestamp representing the server + time when this object was created. It is not guaranteed to be set + in happens-before order across separate operations. Clients may not + set this value. It is represented in RFC3339 form and is in UTC. \n + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to gracefully + terminate before it will be removed from the system. Only set when + deletionTimestamp is also set. May only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: "DeletionTimestamp is RFC 3339 date and time at which this + resource will be deleted. This field is set by the server when a graceful + deletion is requested by the user, and is not directly settable by + a client. The resource is expected to be deleted (no longer visible + from resource lists, and not reachable by name) after the time in + this field, once the finalizers list is empty. As long as the finalizers + list contains items, deletion is blocked. Once the deletionTimestamp + is set, this value may not be unset or be set further into the future, + although it may be shortened or the resource may be deleted prior + to this time. For example, a user may request that a pod is deleted + in 30 seconds. The Kubelet will react by sending a graceful termination + signal to the containers in the pod. After that 30 seconds, the Kubelet + will send a hard termination signal (SIGKILL) to the container and + after cleanup, remove the pod from the API. In the presence of network + partitions, this object may still exist after this timestamp, until + an administrator or automated process can determine the resource is + fully terminated. If not set, graceful deletion of the object has + not been requested. \n Populated by the system when a graceful deletion + is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from the registry. + Each entry is an identifier for the responsible component that will + remove the entry from the list. If the deletionTimestamp of the object + is non-nil, entries in this list can only be removed. + items: + type: string + type: array + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules as the Name + field, and may be truncated by the length of the suffix required to + make the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return a 409 - + instead, it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time allotted, + and the client should retry (optionally after the time indicated in + the Retry-After header). \n Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + generation: + description: A sequence number representing a specific generation of + the desired state. Populated by the system. Read-only. + format: int64 + type: integer + initializers: + description: "An initializer is a controller which enforces some system + invariant at object creation time. This field is a list of initializers + that have not yet acted on this object. If nil or empty, this object + has been completely initialized. Otherwise, the object is considered + uninitialized and is hidden (in list/watch and get calls) from clients + that haven't explicitly asked to observe uninitialized objects. \n + When an object is created, the system will populate this list with + the current set of initializers. Only privileged users may set or + modify this list. Once it is empty, it may not be modified further + by any user. \n DEPRECATED - initializers are an alpha field and will + be removed in v1.15." + properties: + pending: + description: Pending is a list of initializers that must execute + in order before this object is visible. When the last pending + initializer is removed, and no failing result is set, the initializers + struct will be set to nil and the object is considered as initialized + and visible to all clients. + items: + properties: + name: + description: name of the process that is responsible for initializing + this object. + type: string + required: + - name + type: object + type: array + result: + description: If result is set with the Failure field, the object + will be persisted to storage and then deleted, ensuring that other + clients can observe the deletion. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + code: + description: Suggested HTTP return code for this status, 0 if + not set. + format: int32 + type: integer + details: + description: Extended data associated with the reason. Each + reason may define its own extended details. This field is + optional and the data returned is not guaranteed to conform + to any schema except that defined by the reason type. + properties: + causes: + description: The Causes array includes more details associated + with the StatusReason failure. Not all StatusReasons may + provide detailed causes. + items: + properties: + field: + description: "The field of the resource that has caused + this error, as named by its JSON serialization. + May include dot and postfix notation for nested + attributes. Arrays are zero-indexed. Fields may + appear more than once in an array of causes due + to fields having multiple errors. Optional. \n Examples: + \ \"name\" - the field \"name\" on the current + resource \"items[0].name\" - the field \"name\" + on the first array entry in \"items\"" + type: string + message: + description: A human-readable description of the cause + of the error. This field may be presented as-is + to a reader. + type: string + reason: + description: A machine-readable description of the + cause of the error. If this value is empty there + is no information available. + type: string + type: object + type: array + group: + description: The group attribute of the resource associated + with the status StatusReason. + type: string + kind: + description: 'The kind attribute of the resource associated + with the status StatusReason. On some operations may differ + from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: The name attribute of the resource associated + with the status StatusReason (when there is a single name + which can be described). + type: string + retryAfterSeconds: + description: If specified, the time in seconds before the + operation should be retried. Some errors may indicate + the client must take an alternate action - for those errors + this field may indicate how long to wait before taking + the alternate action. + format: int32 + type: integer + uid: + description: 'UID of the resource. (when there is a single + resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + message: + description: A human-readable description of the status of this + operation. + type: string + metadata: + description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + properties: + continue: + description: continue may be set if the user set a limit + on the number of items returned, and indicates that the + server has more data available. The value is opaque and + may be used to issue another request to the endpoint that + served this list to retrieve the next set of available + objects. Continuing a consistent list may not be possible + if the server configuration has changed or more than a + few minutes have passed. The resourceVersion field returned + when using this continue value will be identical to the + value in the first response, unless you have received + this token from an error message. + type: string + resourceVersion: + description: 'String that identifies the server''s internal + version of this object that can be used by clients to + determine when objects have changed. Value must be treated + as opaque by clients and passed unmodified back to the + server. Populated by the system. Read-only. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + selfLink: + description: selfLink is a URL representing this object. + Populated by the system. Read-only. + type: string + type: object + reason: + description: A machine-readable description of why this operation + is in the "Failure" status. If this value is empty there is + no information available. A Reason clarifies an HTTP status + code but does not override it. + type: string + status: + description: 'Status of the operation. One of: "Success" or + "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + type: string + type: object + required: + - pending + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: "ManagedFields maps workflow-id and version to the set + of fields that are managed by that workflow. This is mostly for internal + housekeeping, and users typically shouldn't need to set or understand + this field. A workflow can be the user's name, a controller's name, + or the name of a specific apply path like \"ci-cd\". The set of fields + is always in the version that the workflow used when modifying the + object. \n This field is alpha and can be changed or removed without + notice." + items: + properties: + apiVersion: + description: APIVersion defines the version of this resource that + this field set applies to. The format is "group/version" just + like the top-level APIVersion field. It is necessary to track + the version of a field set because it cannot be automatically + converted. + type: string + fields: + additionalProperties: true + description: Fields identifies a set of fields. + type: object + manager: + description: Manager is an identifier of the workflow managing + these fields. + type: string + operation: + description: Operation is the type of operation which lead to + this ManagedFieldsEntry being created. The only valid values + for this field are 'Apply' and 'Update'. + type: string + time: + description: Time is timestamp of when these fields were set. + It should always be empty if Operation is 'Apply' + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required when + creating resources, although some resources may allow a client to + request the generation of an appropriate name automatically. Name + is primarily intended for creation idempotence and configuration definition. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. + An empty namespace is equivalent to the \"default\" namespace, but + \"default\" is the canonical representation. Not all objects are required + to be scoped to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this list + will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. To + set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: "An opaque value that represents the internal version of + this object that can be used by clients to determine when objects + have changed. May be used for optimistic concurrency, change detection, + and the watch operation on a resource or set of resources. Clients + must treat these values as opaque and passed unmodified back to the + server. They may only be valid for a particular resource or set of + resources. \n Populated by the system. Read-only. Value must be treated + as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" + type: string + selfLink: + description: SelfLink is a URL representing this object. Populated by + the system. Read-only. + type: string + uid: + description: "UID is the unique in time and space value for this object. + It is typically generated by the server on successful creation of + a resource and is not allowed to change on PUT operations. \n Populated + by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + type: string + type: object + spec: + type: object + status: + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/service.azure_storages.yaml b/config/crd/bases/service.azure_storages.yaml new file mode 100644 index 00000000000..208373a5ee1 --- /dev/null +++ b/config/crd/bases/service.azure_storages.yaml @@ -0,0 +1,430 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: storages.service.azure +spec: + group: service.azure + names: + kind: Storage + plural: storages + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + description: Storage is the Schema for the storages API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with + a resource that may be set by external tools to store and retrieve + arbitrary metadata. They are not queryable and should be preserved + when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + clusterName: + description: The name of the cluster which the object belongs to. This + is used to distinguish resources with same name and namespace in different + clusters. This field is not set anywhere right now and apiserver is + going to ignore it if set in create or update request. + type: string + creationTimestamp: + description: "CreationTimestamp is a timestamp representing the server + time when this object was created. It is not guaranteed to be set + in happens-before order across separate operations. Clients may not + set this value. It is represented in RFC3339 form and is in UTC. \n + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to gracefully + terminate before it will be removed from the system. Only set when + deletionTimestamp is also set. May only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: "DeletionTimestamp is RFC 3339 date and time at which this + resource will be deleted. This field is set by the server when a graceful + deletion is requested by the user, and is not directly settable by + a client. The resource is expected to be deleted (no longer visible + from resource lists, and not reachable by name) after the time in + this field, once the finalizers list is empty. As long as the finalizers + list contains items, deletion is blocked. Once the deletionTimestamp + is set, this value may not be unset or be set further into the future, + although it may be shortened or the resource may be deleted prior + to this time. For example, a user may request that a pod is deleted + in 30 seconds. The Kubelet will react by sending a graceful termination + signal to the containers in the pod. After that 30 seconds, the Kubelet + will send a hard termination signal (SIGKILL) to the container and + after cleanup, remove the pod from the API. In the presence of network + partitions, this object may still exist after this timestamp, until + an administrator or automated process can determine the resource is + fully terminated. If not set, graceful deletion of the object has + not been requested. \n Populated by the system when a graceful deletion + is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from the registry. + Each entry is an identifier for the responsible component that will + remove the entry from the list. If the deletionTimestamp of the object + is non-nil, entries in this list can only be removed. + items: + type: string + type: array + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules as the Name + field, and may be truncated by the length of the suffix required to + make the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return a 409 - + instead, it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time allotted, + and the client should retry (optionally after the time indicated in + the Retry-After header). \n Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + generation: + description: A sequence number representing a specific generation of + the desired state. Populated by the system. Read-only. + format: int64 + type: integer + initializers: + description: "An initializer is a controller which enforces some system + invariant at object creation time. This field is a list of initializers + that have not yet acted on this object. If nil or empty, this object + has been completely initialized. Otherwise, the object is considered + uninitialized and is hidden (in list/watch and get calls) from clients + that haven't explicitly asked to observe uninitialized objects. \n + When an object is created, the system will populate this list with + the current set of initializers. Only privileged users may set or + modify this list. Once it is empty, it may not be modified further + by any user. \n DEPRECATED - initializers are an alpha field and will + be removed in v1.15." + properties: + pending: + description: Pending is a list of initializers that must execute + in order before this object is visible. When the last pending + initializer is removed, and no failing result is set, the initializers + struct will be set to nil and the object is considered as initialized + and visible to all clients. + items: + properties: + name: + description: name of the process that is responsible for initializing + this object. + type: string + required: + - name + type: object + type: array + result: + description: If result is set with the Failure field, the object + will be persisted to storage and then deleted, ensuring that other + clients can observe the deletion. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + code: + description: Suggested HTTP return code for this status, 0 if + not set. + format: int32 + type: integer + details: + description: Extended data associated with the reason. Each + reason may define its own extended details. This field is + optional and the data returned is not guaranteed to conform + to any schema except that defined by the reason type. + properties: + causes: + description: The Causes array includes more details associated + with the StatusReason failure. Not all StatusReasons may + provide detailed causes. + items: + properties: + field: + description: "The field of the resource that has caused + this error, as named by its JSON serialization. + May include dot and postfix notation for nested + attributes. Arrays are zero-indexed. Fields may + appear more than once in an array of causes due + to fields having multiple errors. Optional. \n Examples: + \ \"name\" - the field \"name\" on the current + resource \"items[0].name\" - the field \"name\" + on the first array entry in \"items\"" + type: string + message: + description: A human-readable description of the cause + of the error. This field may be presented as-is + to a reader. + type: string + reason: + description: A machine-readable description of the + cause of the error. If this value is empty there + is no information available. + type: string + type: object + type: array + group: + description: The group attribute of the resource associated + with the status StatusReason. + type: string + kind: + description: 'The kind attribute of the resource associated + with the status StatusReason. On some operations may differ + from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: The name attribute of the resource associated + with the status StatusReason (when there is a single name + which can be described). + type: string + retryAfterSeconds: + description: If specified, the time in seconds before the + operation should be retried. Some errors may indicate + the client must take an alternate action - for those errors + this field may indicate how long to wait before taking + the alternate action. + format: int32 + type: integer + uid: + description: 'UID of the resource. (when there is a single + resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + message: + description: A human-readable description of the status of this + operation. + type: string + metadata: + description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + properties: + continue: + description: continue may be set if the user set a limit + on the number of items returned, and indicates that the + server has more data available. The value is opaque and + may be used to issue another request to the endpoint that + served this list to retrieve the next set of available + objects. Continuing a consistent list may not be possible + if the server configuration has changed or more than a + few minutes have passed. The resourceVersion field returned + when using this continue value will be identical to the + value in the first response, unless you have received + this token from an error message. + type: string + resourceVersion: + description: 'String that identifies the server''s internal + version of this object that can be used by clients to + determine when objects have changed. Value must be treated + as opaque by clients and passed unmodified back to the + server. Populated by the system. Read-only. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + selfLink: + description: selfLink is a URL representing this object. + Populated by the system. Read-only. + type: string + type: object + reason: + description: A machine-readable description of why this operation + is in the "Failure" status. If this value is empty there is + no information available. A Reason clarifies an HTTP status + code but does not override it. + type: string + status: + description: 'Status of the operation. One of: "Success" or + "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + type: string + type: object + required: + - pending + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: "ManagedFields maps workflow-id and version to the set + of fields that are managed by that workflow. This is mostly for internal + housekeeping, and users typically shouldn't need to set or understand + this field. A workflow can be the user's name, a controller's name, + or the name of a specific apply path like \"ci-cd\". The set of fields + is always in the version that the workflow used when modifying the + object. \n This field is alpha and can be changed or removed without + notice." + items: + properties: + apiVersion: + description: APIVersion defines the version of this resource that + this field set applies to. The format is "group/version" just + like the top-level APIVersion field. It is necessary to track + the version of a field set because it cannot be automatically + converted. + type: string + fields: + additionalProperties: true + description: Fields identifies a set of fields. + type: object + manager: + description: Manager is an identifier of the workflow managing + these fields. + type: string + operation: + description: Operation is the type of operation which lead to + this ManagedFieldsEntry being created. The only valid values + for this field are 'Apply' and 'Update'. + type: string + time: + description: Time is timestamp of when these fields were set. + It should always be empty if Operation is 'Apply' + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required when + creating resources, although some resources may allow a client to + request the generation of an appropriate name automatically. Name + is primarily intended for creation idempotence and configuration definition. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. + An empty namespace is equivalent to the \"default\" namespace, but + \"default\" is the canonical representation. Not all objects are required + to be scoped to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this list + will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. To + set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: "An opaque value that represents the internal version of + this object that can be used by clients to determine when objects + have changed. May be used for optimistic concurrency, change detection, + and the watch operation on a resource or set of resources. Clients + must treat these values as opaque and passed unmodified back to the + server. They may only be valid for a particular resource or set of + resources. \n Populated by the system. Read-only. Value must be treated + as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" + type: string + selfLink: + description: SelfLink is a URL representing this object. Populated by + the system. Read-only. + type: string + uid: + description: "UID is the unique in time and space value for this object. + It is typically generated by the server on successful creation of + a resource and is not allowed to change on PUT operations. \n Populated + by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + type: string + type: object + spec: + properties: + accessTier: + type: string + accountName: + type: string + kind: + type: string + location: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + type: string + sku: + properties: + name: + description: 'Name - The SKU name. Required for account creation; + optional for update. Possible values include: ''StandardLRS'', + ''StandardGRS'', ''StandardRAGRS'', ''StandardZRS'', ''PremiumLRS'', + ''PremiumZRS'', ''StandardGZRS'', ''StandardRAGZRS''' + type: string + type: object + supportsHttpsTrafficOnly: + type: boolean + type: object + status: + properties: + resourceGroupName: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 00000000000..d6630859e19 --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,48 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - service.azure + resources: + - cosmosdbs + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - service.azure + resources: + - cosmosdbs/status + verbs: + - get + - update + - patch +- apiGroups: + - service.azure + resources: + - storages + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - service.azure + resources: + - storages/status + verbs: + - get + - update + - patch diff --git a/config/samples/service_v1alpha1_storage.yaml b/config/samples/service_v1alpha1_storage.yaml index 6ccb130127e..58c2f29d9f7 100644 --- a/config/samples/service_v1alpha1_storage.yaml +++ b/config/samples/service_v1alpha1_storage.yaml @@ -3,5 +3,11 @@ kind: Storage metadata: name: storage-sample spec: - # Add fields here - foo: bar + accountName: binxi071905 + location: eastus2 + sku: + name: Standard_RAGRS + tier: Standard + kind: StorageV2 + accessTier: Hot + supportsHttpsTrafficOnly: true diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index a54d5164272..1441ce7ddf9 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -28,10 +28,14 @@ import ( "context" "github.com/go-logr/logr" + uuid "github.com/satori/go.uuid" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "github.com/Azure/azure-service-operator/pkg/client/group" + "github.com/Azure/azure-service-operator/pkg/helpers" + storagetemplate "github.com/Azure/azure-service-operator/pkg/storage" ) // StorageReconciler reconciles a Storage object @@ -44,11 +48,75 @@ type StorageReconciler struct { // +kubebuilder:rbac:groups=service.azure,resources=storages/status,verbs=get;update;patch func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { - _ = context.Background() - _ = r.Log.WithValues("storage", req.NamespacedName) + ctx := context.Background() + log := r.Log.WithValues("storage", req.NamespacedName) - // your logic here + // Fetch the Storage instance + instance := &servicev1alpha1.Storage{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + log.Error(err, "unable to fetch Storage") + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + storageFinalizerName := "storage.finalizers.azure" + + // examine DeletionTimestamp to determine if object is under deletion + if instance.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !helpers.ContainsString(instance.ObjectMeta.Finalizers, storageFinalizerName) { + instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, storageFinalizerName) + if err := r.Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } + } + } else { + // The object is being deleted + if helpers.ContainsString(instance.ObjectMeta.Finalizers, storageFinalizerName) { + // our finalizer is present, so lets handle any external dependency + if err := r.deleteExternalResources(instance); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried + return ctrl.Result{}, err + } + + // remove our finalizer from the list and update it. + instance.ObjectMeta.Finalizers = helpers.RemoveString(instance.ObjectMeta.Finalizers, storageFinalizerName) + if err := r.Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, err + } + + var resourceGroupName string + if instance.Status.ResourceGroupName != "" { + resourceGroupName = instance.Status.ResourceGroupName + } else { + resourceGroupName = uuid.NewV4().String() + log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) + group.CreateGroup(ctx, resourceGroupName) + _, err = r.updateStatus(req, resourceGroupName) + if err != nil { + return ctrl.Result{}, err + } + } + + log.Info("Reconciling Storage", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) + template := storagetemplate.New(instance) + _, err = template.CreateDeployment(ctx, resourceGroupName) + if err != nil { + log.Error(err, "Failed to reconcile Storage") + return ctrl.Result{}, err + } + + // Storage created successfully - don't requeue return ctrl.Result{}, nil } @@ -57,3 +125,39 @@ func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&servicev1alpha1.Storage{}). Complete(r) } + +func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName string) (*servicev1alpha1.Storage, error) { + ctx := context.Background() + log := r.Log.WithValues("storage", req.NamespacedName) + + resource := &servicev1alpha1.Storage{} + r.Get(ctx, req.NamespacedName, resource) + + resourceCopy := resource.DeepCopy() + resourceCopy.Status.ResourceGroupName = resourceGroupName + log.Info("Getting Storage Account", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.Status.ResourceGroupName", resourceCopy.Status.ResourceGroupName) + + if err := r.Status().Update(ctx, resourceCopy); err != nil { + log.Error(err, "unable to update Storage status") + return nil, err + } + + return resourceCopy, nil +} + +func (r *StorageReconciler) deleteExternalResources(instance *servicev1alpha1.Storage) error { + // + // delete any external resources associated with the storage + // + // Ensure that delete implementation is idempotent and safe to invoke + // multiple types for same object. + ctx := context.Background() + log := r.Log.WithValues("Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) + + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + log.Info("Deleting Storage Account", "Storage.Status.ResourceGroupName", instance.Status.ResourceGroupName) + _, err := group.DeleteGroup(ctx, instance.Status.ResourceGroupName) + return err +} diff --git a/go.mod b/go.mod index 2bee5f79233..2fede21bef8 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,17 @@ module github.com/Azure/azure-service-operator go 1.12 require ( + github.com/Azure/azure-sdk-for-go v31.1.0+incompatible + github.com/Azure/go-autorest/autorest v0.5.0 + github.com/Azure/go-autorest/autorest/adal v0.2.0 + github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 + github.com/Azure/go-autorest/autorest/to v0.2.0 + github.com/Azure/go-autorest/autorest/validation v0.1.0 // indirect github.com/go-logr/logr v0.1.0 - github.com/onsi/ginkgo v1.6.0 - github.com/onsi/gomega v1.4.2 - golang.org/x/net v0.0.0-20180906233101-161cd47e91fd + github.com/onsi/ginkgo v1.7.0 + github.com/onsi/gomega v1.4.3 + github.com/satori/go.uuid v1.2.0 + golang.org/x/net v0.0.0-20190311183353-d8887717615a k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible sigs.k8s.io/controller-runtime v0.2.0-beta.2 diff --git a/go.sum b/go.sum index 39d372ab640..36a5d41e1c5 100644 --- a/go.sum +++ b/go.sum @@ -1,39 +1,107 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/azure-sdk-for-go v31.1.0+incompatible h1:5SzgnfAvUBdBwNTN23WLfZoCt/rGhLvd7QdCAaFXgY4= +github.com/Azure/azure-sdk-for-go v31.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= +github.com/Azure/go-autorest/autorest v0.5.0 h1:Mlm9qy2fpQ9MvfyI41G2Zf5B4CsgjjNbLOWszfK6KrY= +github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/adal v0.2.0 h1:7IBDu1jgh+ADHXnEYExkV9RE/ztOOlxdACkkPRthGKw= +github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 h1:YgO/vSnJEc76NLw2ecIXvXa8bDWiqf1pOJzARAoZsYU= +github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0 h1:YTtBrcb6mhA+PoSW8WxFDoIIyjp13XqJeX80ssQtri4= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/to v0.2.0 h1:nQOZzFCudTh+TvquAtCRjM01VEYx85e9qbwt5ncW4L8= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/validation v0.1.0 h1:ISSNzGUh+ZSzizJWOWzs8bwpXIePbGLW4z/AmUFGH5A= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -41,35 +109,64 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -78,21 +175,69 @@ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -100,10 +245,14 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= diff --git a/pkg/client/deployment/deployment.go b/pkg/client/deployment/deployment.go new file mode 100644 index 00000000000..f5ae77a067d --- /dev/null +++ b/pkg/client/deployment/deployment.go @@ -0,0 +1,49 @@ +package deployment + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" + + "github.com/Azure/azure-service-operator/pkg/config" + "github.com/Azure/azure-service-operator/pkg/iam" +) + +func getDeploymentsClient() resources.DeploymentsClient { + deployClient := resources.NewDeploymentsClient(config.SubscriptionID()) + a, _ := iam.GetResourceManagementAuthorizer() + deployClient.Authorizer = a + return deployClient +} + +// CreateDeployment creates a template deployment using the +// referenced JSON files for the template and its parameters +func CreateDeployment(ctx context.Context, resourceGroupName, deploymentName, templateUri string, params *map[string]interface{}) (de resources.DeploymentExtended, err error) { + deployClient := getDeploymentsClient() + templateLink := resources.TemplateLink{ + URI: &templateUri, + } + future, err := deployClient.CreateOrUpdate( + ctx, + resourceGroupName, + deploymentName, + resources.Deployment{ + Properties: &resources.DeploymentProperties{ + TemplateLink: &templateLink, + Parameters: params, + Mode: resources.Incremental, + }, + }, + ) + if err != nil { + return de, fmt.Errorf("cannot create deployment: %v", err) + } + + err = future.WaitForCompletionRef(ctx, deployClient.Client) + if err != nil { + return de, fmt.Errorf("cannot get the create deployment future respone: %v", err) + } + + return future.Result(deployClient) +} diff --git a/pkg/client/group/group.go b/pkg/client/group/group.go new file mode 100644 index 00000000000..fda5e02d2ac --- /dev/null +++ b/pkg/client/group/group.go @@ -0,0 +1,40 @@ +package group + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" + + "github.com/Azure/go-autorest/autorest/to" + + "github.com/Azure/azure-service-operator/pkg/config" + "github.com/Azure/azure-service-operator/pkg/iam" +) + +func getGroupsClient() resources.GroupsClient { + groupsClient := resources.NewGroupsClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + groupsClient.Authorizer = a + return groupsClient +} + +// CreateGroup creates a new resource group named by env var +func CreateGroup(ctx context.Context, groupName string) (resources.Group, error) { + groupsClient := getGroupsClient() + return groupsClient.CreateOrUpdate( + ctx, + groupName, + resources.Group{ + Location: to.StringPtr("eastus2"), + }) +} + +// DeleteGroup removes the resource group named by env var +func DeleteGroup(ctx context.Context, groupName string) (result resources.GroupsDeleteFuture, err error) { + groupsClient := getGroupsClient() + return groupsClient.Delete(ctx, groupName) +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 00000000000..464d6827cba --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,67 @@ +package config + +import ( + "fmt" + "os" + + "github.com/Azure/go-autorest/autorest/azure" +) + +const ( + CloudNameEnvVar = "CLOUD_NAME" + SubscriptionIDEnvVar = "SUBSCRIPTION_ID" + ClientIDEnvVar = "CLIENT_ID" + ClientSecretEnvVar = "CLIENT_SECRET" + TenantIDEnvVar = "TENANT_ID" + UseAADPodIdentityEnvVar = "USE_AAD_POD_IDENTITY" +) + +// GetEnvVar returns the value of the environment variable +func GetEnvVar(envVarName string) string { + v, found := os.LookupEnv(envVarName) + if !found { + panic(fmt.Sprintf("%s must be set", envVarName)) + } + return v +} + +// CloudName returns the cloud name +func CloudName() string { + return GetEnvVar(CloudNameEnvVar) +} + +// GetSubscriptionID returns the subscription ID +func SubscriptionID() string { + return GetEnvVar(SubscriptionIDEnvVar) +} + +// GetClientID returns the client ID +func ClientID() string { + return GetEnvVar(ClientIDEnvVar) +} + +// GetClientSecret returns the client secret +func ClientSecret() string { + return GetEnvVar(ClientSecretEnvVar) +} + +// GetTenantID returns the tenant ID +func TenantID() string { + return GetEnvVar(TenantIDEnvVar) +} + +// UseAADPodIdentity returns whether AAD Pod Identity is used +func UseAADPodIdentity() bool { + return GetEnvVar(UseAADPodIdentityEnvVar) == "true" +} + +// Environment() returns an `azure.Environment{...}` for the current cloud. +func Environment() azure.Environment { + cloudName := CloudName() + env, err := azure.EnvironmentFromName(cloudName) + if err != nil { + panic(fmt.Sprintf( + "invalid cloud name '%s' specified, cannot continue\n", cloudName)) + } + return env +} diff --git a/pkg/helpers/helpers.go b/pkg/helpers/helpers.go new file mode 100644 index 00000000000..404fd84df81 --- /dev/null +++ b/pkg/helpers/helpers.go @@ -0,0 +1,21 @@ +package helpers + +// Helper functions to check and remove string from a slice of strings. +func ContainsString(slice []string, s string) bool { + for _, item := range slice { + if item == s { + return true + } + } + return false +} + +func RemoveString(slice []string, s string) (result []string) { + for _, item := range slice { + if item == s { + continue + } + result = append(result, item) + } + return +} diff --git a/pkg/iam/authorizers.go b/pkg/iam/authorizers.go new file mode 100644 index 00000000000..1f6025e4b12 --- /dev/null +++ b/pkg/iam/authorizers.go @@ -0,0 +1,58 @@ +package iam + +import ( + "github.com/Azure/azure-service-operator/pkg/config" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure/auth" +) + +var ( + armAuthorizer autorest.Authorizer +) + +// GetResourceManagementAuthorizer gets an OAuthTokenAuthorizer for Azure Resource Manager +func GetResourceManagementAuthorizer() (autorest.Authorizer, error) { + if armAuthorizer != nil { + return armAuthorizer, nil + } + + var a autorest.Authorizer + var err error + + if config.UseAADPodIdentity() { + a, err = auth.NewAuthorizerFromEnvironment() + } else { + a, err = getAuthorizerForResource(config.Environment().ResourceManagerEndpoint) + } + if err == nil { + // cache + armAuthorizer = a + } else { + // clear cache + armAuthorizer = nil + } + + return armAuthorizer, err +} + +func getAuthorizerForResource(resource string) (autorest.Authorizer, error) { + var a autorest.Authorizer + var err error + + oauthConfig, err := adal.NewOAuthConfig( + config.Environment().ActiveDirectoryEndpoint, config.TenantID()) + if err != nil { + return nil, err + } + + token, err := adal.NewServicePrincipalToken( + *oauthConfig, config.ClientID(), config.ClientSecret(), resource) + if err != nil { + return nil, err + } + a = autorest.NewBearerAuthorizer(token) + + return a, err +} diff --git a/pkg/storage/storage_template.go b/pkg/storage/storage_template.go new file mode 100644 index 00000000000..e237ed2fb7e --- /dev/null +++ b/pkg/storage/storage_template.go @@ -0,0 +1,50 @@ +package storage + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" + uuid "github.com/satori/go.uuid" + + azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "github.com/Azure/azure-service-operator/pkg/client/deployment" +) + +// New generates a new object +func New(storage *azureV1alpha1.Storage) *Template { + return &Template{ + Storage: storage, + } +} + +// Template defines the dynamodb cfts +type Template struct { + Storage *azureV1alpha1.Storage +} + +func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (resources.DeploymentExtended, error) { + deploymentName := uuid.NewV4().String() + templateURI := "https://azureserviceoperator.blob.core.windows.net/templates/storage.json" + params := map[string]interface{}{ + "location": map[string]interface{}{ + "value": t.Storage.Spec.Location, + }, + "storageAccountName": map[string]interface{}{ + "value": t.Storage.Spec.AccountName, + }, + "accountType": map[string]interface{}{ + "value": t.Storage.Spec.Sku.Name, + }, + "kind": map[string]interface{}{ + "value": t.Storage.Spec.Kind, + }, + "accessTier": map[string]interface{}{ + "value": t.Storage.Spec.AccessTier, + }, + "supportsHttpsTrafficOnly": map[string]interface{}{ + "value": *t.Storage.Spec.EnableHTTPSTrafficOnly, + }, + } + + return deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) +} diff --git a/pkg/template/storage.json b/pkg/template/storage.json new file mode 100644 index 00000000000..80d2595020e --- /dev/null +++ b/pkg/template/storage.json @@ -0,0 +1,43 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "String" + }, + "storageAccountName": { + "type": "String" + }, + "accountType": { + "type": "String" + }, + "kind": { + "type": "String" + }, + "accessTier": { + "type": "String" + }, + "supportsHttpsTrafficOnly": { + "type": "Bool" + } + }, + "variables": {}, + "resources": [ + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "2018-07-01", + "name": "[parameters('storageAccountName')]", + "location": "[parameters('location')]", + "dependsOn": [], + "sku": { + "name": "[parameters('accountType')]" + }, + "kind": "[parameters('kind')]", + "properties": { + "accessTier": "[parameters('accessTier')]", + "supportsHttpsTrafficOnly": "[parameters('supportsHttpsTrafficOnly')]" + } + } + ], + "outputs": {} +} From 66dc93d1d019395235976b0515ce7c1974c04ae2 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Sat, 20 Jul 2019 00:15:18 +0800 Subject: [PATCH 06/34] Add development docs --- docs/development.md | 72 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 docs/development.md diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 00000000000..00ef40cfe94 --- /dev/null +++ b/docs/development.md @@ -0,0 +1,72 @@ +# Development + +## Prerequisites + +To get started you will need: + +* a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster, e.g. [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +## Test It Out Locally + +Install the CRDs into the cluster: + +``` +make install +``` + +Setup the environment variables: + +``` +export USE_AAD_POD_IDENTITY=false +export CLOUD_NAME=AzurePublicCloud +export TENANT_ID= +export SUBSCRIPTION_ID= +export CLIENT_ID= +export CLIENT_SECRET= +``` + +Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): + +``` +make run +``` + +Refer to [kubebuilder's doc](https://book.kubebuilder.io/quick-start.html#test-it-out-locally). + +## Create a Custom Resource + +Create your CR (make sure to edit them first to specify the fields). Example: + +``` +kubectl apply -f config/samples/service_v1alpha1_storage.yaml +``` + +## Add a New Custom Resource + +### 1. Add a New API + +``` +kubebuilder create api --group service --version v1alpha1 --kind +``` + +Refer to [kubebuilder's doc](https://book.kubebuilder.io/cronjob-tutorial/new-api.html) + +### 2. Design an API + +1. Try to create the specific Azure service, and download the template in the `Review+Create` step. +2. Upload the template to a storage account. For now, we can use the storage account `azureserviceoperator`. +3. Based on the template, we can figure out what the `Spec` should be like. +4. The `Status` should contain the resource group name, which can be used to delete the resource. + +Refer to [kubebuilder's doc](https://book.kubebuilder.io/cronjob-tutorial/api-design.html) + +Note: + +* Don't forget to add `// +kubebuilder:subresource:status` if we want a status subresource. + +* Run `make manifests` if you find the property you add doesn't work. + +### 3. Delete external resource + +[Using Finalizers](https://book.kubebuilder.io/reference/using-finalizers.html) From 110fa99144da411e805d881d00eadf5663823d54 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Sat, 20 Jul 2019 19:16:56 +0800 Subject: [PATCH 07/34] Remove the storage account name from the spec --- api/v1alpha1/storage_types.go | 1 - config/crd/bases/service.azure_storages.yaml | 2 -- config/samples/service_v1alpha1_storage.yaml | 1 - pkg/helpers/helpers.go | 17 ++++++++++++++++- pkg/storage/storage_template.go | 3 ++- 5 files changed, 18 insertions(+), 6 deletions(-) diff --git a/api/v1alpha1/storage_types.go b/api/v1alpha1/storage_types.go index e16f491bbe6..8702433c1fa 100644 --- a/api/v1alpha1/storage_types.go +++ b/api/v1alpha1/storage_types.go @@ -38,7 +38,6 @@ type StorageSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file Location string `json:"location,omitempty"` - AccountName string `json:"accountName,omitempty"` Sku Sku `json:"sku,omitempty"` Kind storagesdk.Kind `json:"kind,omitempty"` AccessTier storagesdk.AccessTier `json:"accessTier,omitempty"` diff --git a/config/crd/bases/service.azure_storages.yaml b/config/crd/bases/service.azure_storages.yaml index 208373a5ee1..43ace0985bb 100644 --- a/config/crd/bases/service.azure_storages.yaml +++ b/config/crd/bases/service.azure_storages.yaml @@ -389,8 +389,6 @@ spec: properties: accessTier: type: string - accountName: - type: string kind: type: string location: diff --git a/config/samples/service_v1alpha1_storage.yaml b/config/samples/service_v1alpha1_storage.yaml index 58c2f29d9f7..4bd1c65ab35 100644 --- a/config/samples/service_v1alpha1_storage.yaml +++ b/config/samples/service_v1alpha1_storage.yaml @@ -3,7 +3,6 @@ kind: Storage metadata: name: storage-sample spec: - accountName: binxi071905 location: eastus2 sku: name: Standard_RAGRS diff --git a/pkg/helpers/helpers.go b/pkg/helpers/helpers.go index 404fd84df81..2d6c60d0762 100644 --- a/pkg/helpers/helpers.go +++ b/pkg/helpers/helpers.go @@ -1,6 +1,12 @@ package helpers -// Helper functions to check and remove string from a slice of strings. +import ( + "fmt" + "strings" + + uuid "github.com/satori/go.uuid" +) + func ContainsString(slice []string, s string) bool { for _, item := range slice { if item == s { @@ -19,3 +25,12 @@ func RemoveString(slice []string, s string) (result []string) { } return } + +func AzureResourceName(kind string) string { + var name string + switch kind { + case "Storage": + name = fmt.Sprintf("aso%s", strings.ReplaceAll(uuid.NewV4().String(), "-", ""))[:24] + } + return name +} diff --git a/pkg/storage/storage_template.go b/pkg/storage/storage_template.go index e237ed2fb7e..b73e7458b2f 100644 --- a/pkg/storage/storage_template.go +++ b/pkg/storage/storage_template.go @@ -8,6 +8,7 @@ import ( azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" "github.com/Azure/azure-service-operator/pkg/client/deployment" + "github.com/Azure/azure-service-operator/pkg/helpers" ) // New generates a new object @@ -30,7 +31,7 @@ func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName strin "value": t.Storage.Spec.Location, }, "storageAccountName": map[string]interface{}{ - "value": t.Storage.Spec.AccountName, + "value": helpers.AzureResourceName(t.Storage.Kind), }, "accountType": map[string]interface{}{ "value": t.Storage.Spec.Sku.Name, From 78e9d7bfb6855f2e9f66b4f648591b876b139e28 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Sun, 21 Jul 2019 02:15:04 +0800 Subject: [PATCH 08/34] Sync additional resources for Azure storage account 1. Create a secret based on storage account credentials 2. Add the global config --- Makefile | 8 +- api/v1alpha1/storage_types.go | 90 +++++++++++++--- api/v1alpha1/zz_generated.deepcopy.go | 37 +++++++ config/crd/bases/service.azure_storages.yaml | 43 +++++++- controllers/storage_controller.go | 108 ++++++++++++++----- go.mod | 2 + main.go | 63 +++++++++-- pkg/client/deployment/deployment.go | 2 +- pkg/client/group/group.go | 11 +- pkg/config/config.go | 71 ++++++------ pkg/helpers/deployment.go | 36 +++++++ pkg/helpers/helpers.go | 29 +++-- pkg/helpers/secret.go | 51 +++++++++ pkg/helpers/service.go | 57 ++++++++++ pkg/helpers/types.go | 6 ++ pkg/iam/authorizers.go | 6 +- pkg/storage/storage_template.go | 4 - pkg/template/storage.json | 43 -------- template/storage.json | 63 +++++++++++ 19 files changed, 577 insertions(+), 153 deletions(-) create mode 100644 pkg/helpers/deployment.go create mode 100644 pkg/helpers/secret.go create mode 100644 pkg/helpers/service.go create mode 100644 pkg/helpers/types.go delete mode 100644 pkg/template/storage.json create mode 100644 template/storage.json diff --git a/Makefile b/Makefile index 2d9d3dda15a..a32868b3a4f 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,13 @@ manager: generate fmt vet # Run against the configured Kubernetes cluster in ~/.kube/config run: generate fmt vet - go run ./main.go + go build -o bin/manager main.go + bin/manager \ + --kubeconfig=${KUBECONFIG} \ + --tenant-id=${TENANT_ID} \ + --subscription-id=${SUBSCRIPTION_ID} \ + --client-id=${CLIENT_ID} \ + --client-secret=${CLIENT_SECRET} # Install CRDs into a cluster install: manifests diff --git a/api/v1alpha1/storage_types.go b/api/v1alpha1/storage_types.go index 8702433c1fa..6a91a2f9a72 100644 --- a/api/v1alpha1/storage_types.go +++ b/api/v1alpha1/storage_types.go @@ -26,8 +26,6 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - storagesdk "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! @@ -37,24 +35,90 @@ import ( type StorageSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - Location string `json:"location,omitempty"` - Sku Sku `json:"sku,omitempty"` - Kind storagesdk.Kind `json:"kind,omitempty"` - AccessTier storagesdk.AccessTier `json:"accessTier,omitempty"` - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + + // +kubebuilder:validation:MinLength=0 + + Location string `json:"location,omitempty"` + + Sku Sku `json:"sku,omitempty"` + + Kind Kind `json:"kind,omitempty"` + + AccessTier AccessTier `json:"accessTier,omitempty"` + + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` } // Sku the SKU of the storage account. type Sku struct { - // Name - The SKU name. Required for account creation; optional for update. Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS' - Name storagesdk.SkuName `json:"name,omitempty"` + // Name - The SKU name. Required for account creation; optional for update. + // Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS' + Name SkuName `json:"name,omitempty"` } +// SkuName enumerates the values for sku name. +// Only one of the following sku names may be specified. +// If none of the following sku names is specified, the default one +// is StorageV2. +// +kubebuilder:validation:Enum=Premium_LRS;Premium_ZRS;Standard_GRS;Standard_GZRS;Standard_LRS;Standard_RAGRS;Standard_RAGZRS;Standard_ZRS +type SkuName string + +const ( + PremiumLRS SkuName = "Premium_LRS" + PremiumZRS SkuName = "Premium_ZRS" + StandardGRS SkuName = "Standard_GRS" + StandardGZRS SkuName = "Standard_GZRS" + StandardLRS SkuName = "Standard_LRS" + StandardRAGRS SkuName = "Standard_RAGRS" + StandardRAGZRS SkuName = "Standard_RAGZRS" + StandardZRS SkuName = "Standard_ZRS" +) + +// Kind enumerates the values for kind. +// Only one of the following kinds may be specified. +// If none of the following kinds is specified, the default one +// is StorageV2. +// +kubebuilder:validation:Enum=BlobStorage;BlockBlobStorage;FileStorage;Storage;StorageV2 +type Kind string + +const ( + BlobStorage Kind = "BlobStorage" + BlockBlobStorage Kind = "BlockBlobStorage" + FileStorage Kind = "FileStorage" + StorageV1 Kind = "Storage" + StorageV2 Kind = "StorageV2" +) + +// AccessTier enumerates the values for access tier. +// Only one of the following access tiers may be specified. +// If none of the following access tiers is specified, the default one +// is Hot. +// +kubebuilder:validation:Enum=Cool;Hot +type AccessTier string + +const ( + Cool AccessTier = "Cool" + Hot AccessTier = "Hot" +) + // StorageStatus defines the observed state of Storage type StorageStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file - ResourceGroupName string `json:"resourceGroupName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` +} + +type StorageOutput struct { + StorageAccountName string `json:"storageAccountName,omitempty"` + Key1 string `json:"key1,omitempty"` + Key2 string `json:"key2,omitempty"` + ConnectionString1 string `json:"connectionString1,omitempty"` + ConnectionString2 string `json:"connectionString2,omitempty"` +} + +// StorageAdditionalResources holds the additional resources +type StorageAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` } // +kubebuilder:object:root=true @@ -65,8 +129,10 @@ type Storage struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec StorageSpec `json:"spec,omitempty"` - Status StorageStatus `json:"status,omitempty"` + Spec StorageSpec `json:"spec,omitempty"` + Status StorageStatus `json:"status,omitempty"` + Output StorageOutput `json:"output,omitempty"` + AdditionalResources StorageAdditionalResources `json:"additionalResources,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 237ca5105b0..05212d787cd 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -143,6 +143,8 @@ func (in *Storage) DeepCopyInto(out *Storage) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. @@ -163,6 +165,26 @@ func (in *Storage) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAdditionalResources) DeepCopyInto(out *StorageAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAdditionalResources. +func (in *StorageAdditionalResources) DeepCopy() *StorageAdditionalResources { + if in == nil { + return nil + } + out := new(StorageAdditionalResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageList) DeepCopyInto(out *StorageList) { *out = *in @@ -195,6 +217,21 @@ func (in *StorageList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOutput) DeepCopyInto(out *StorageOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOutput. +func (in *StorageOutput) DeepCopy() *StorageOutput { + if in == nil { + return nil + } + out := new(StorageOutput) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { *out = *in diff --git a/config/crd/bases/service.azure_storages.yaml b/config/crd/bases/service.azure_storages.yaml index 43ace0985bb..321f7fd942d 100644 --- a/config/crd/bases/service.azure_storages.yaml +++ b/config/crd/bases/service.azure_storages.yaml @@ -17,6 +17,13 @@ spec: openAPIV3Schema: description: Storage is the Schema for the storages API properties: + additionalResources: + properties: + secrets: + items: + type: string + type: array + type: object apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest @@ -385,15 +392,36 @@ spec: by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" type: string type: object + output: + properties: + connectionString1: + type: string + connectionString2: + type: string + key1: + type: string + key2: + type: string + storageAccountName: + type: string + type: object spec: properties: accessTier: + enum: + - Cool + - Hot type: string kind: + enum: + - BlobStorage + - BlockBlobStorage + - FileStorage + - Storage + - StorageV2 type: string location: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - Important: Run "make" to regenerate code after modifying this file' + minLength: 0 type: string sku: properties: @@ -402,6 +430,15 @@ spec: optional for update. Possible values include: ''StandardLRS'', ''StandardGRS'', ''StandardRAGRS'', ''StandardZRS'', ''PremiumLRS'', ''PremiumZRS'', ''StandardGZRS'', ''StandardRAGZRS''' + enum: + - Premium_LRS + - Premium_ZRS + - Standard_GRS + - Standard_GZRS + - Standard_LRS + - Standard_RAGRS + - Standard_RAGZRS + - Standard_ZRS type: string type: object supportsHttpsTrafficOnly: @@ -409,7 +446,7 @@ spec: type: object status: properties: - resourceGroupName: + provisioningState: description: 'INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file' diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index 1441ce7ddf9..2b56f91ca0c 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -28,14 +28,15 @@ import ( "context" "github.com/go-logr/logr" - uuid "github.com/satori/go.uuid" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" "github.com/Azure/azure-service-operator/pkg/client/group" + "github.com/Azure/azure-service-operator/pkg/config" "github.com/Azure/azure-service-operator/pkg/helpers" storagetemplate "github.com/Azure/azure-service-operator/pkg/storage" + "github.com/Azure/go-autorest/autorest/to" ) // StorageReconciler reconciles a Storage object @@ -61,9 +62,10 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { // on deleted requests. return ctrl.Result{}, client.IgnoreNotFound(err) } + log.Info("Getting Storage Account", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) + log.V(1).Info("Describing Storage Account", "Storage", instance) storageFinalizerName := "storage.finalizers.azure" - // examine DeletionTimestamp to determine if object is under deletion if instance.ObjectMeta.DeletionTimestamp.IsZero() { // The object is not being deleted, so if it does not have our finalizer, @@ -95,27 +97,28 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { return ctrl.Result{}, err } - var resourceGroupName string - if instance.Status.ResourceGroupName != "" { - resourceGroupName = instance.Status.ResourceGroupName - } else { - resourceGroupName = uuid.NewV4().String() - log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) - group.CreateGroup(ctx, resourceGroupName) - _, err = r.updateStatus(req, resourceGroupName) - if err != nil { - return ctrl.Result{}, err - } + resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "storage", instance.Name, instance.Namespace) + tags := map[string]*string{ + "name": to.StringPtr(instance.Name), + "namespace": to.StringPtr(instance.Namespace), + "kind": to.StringPtr("storage"), } + log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) + group.CreateGroup(ctx, resourceGroupName, instance.Spec.Location, tags) log.Info("Reconciling Storage", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) template := storagetemplate.New(instance) - _, err = template.CreateDeployment(ctx, resourceGroupName) + de, err := template.CreateDeployment(ctx, resourceGroupName) if err != nil { log.Error(err, "Failed to reconcile Storage") return ctrl.Result{}, err } + _, err = r.updateStatus(req, resourceGroupName, *de.Properties.ProvisioningState, de.Properties.Outputs) + if err != nil { + return ctrl.Result{}, err + } + // Storage created successfully - don't requeue return ctrl.Result{}, nil } @@ -126,21 +129,41 @@ func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName string) (*servicev1alpha1.Storage, error) { +func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, provisioningState string, outputs interface{}) (*servicev1alpha1.Storage, error) { ctx := context.Background() log := r.Log.WithValues("storage", req.NamespacedName) resource := &servicev1alpha1.Storage{} r.Get(ctx, req.NamespacedName, resource) + log.Info("Getting Storage Account", "Storage.Namespace", resource.Namespace, "Storage.Name", resource.Name) resourceCopy := resource.DeepCopy() - resourceCopy.Status.ResourceGroupName = resourceGroupName - log.Info("Getting Storage Account", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.Status.ResourceGroupName", resourceCopy.Status.ResourceGroupName) + resourceCopy.Status.ProvisioningState = provisioningState + if helpers.IsDeploymentComplete(provisioningState) { + if outputs != nil { + resourceCopy.Output.StorageAccountName = helpers.GetOutput(outputs, "storageAccountName") + resourceCopy.Output.Key1 = helpers.GetOutput(outputs, "key1") + resourceCopy.Output.Key2 = helpers.GetOutput(outputs, "key2") + resourceCopy.Output.ConnectionString1 = helpers.GetOutput(outputs, "connectionString1") + resourceCopy.Output.ConnectionString2 = helpers.GetOutput(outputs, "connectionString2") + } + } - if err := r.Status().Update(ctx, resourceCopy); err != nil { + err := r.Status().Update(ctx, resourceCopy) + if err != nil { log.Error(err, "unable to update Storage status") return nil, err } + log.Info("Updated Status", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.Status", resourceCopy.Status, "Storage.Output", resourceCopy.Output) + + if helpers.IsDeploymentComplete(provisioningState) { + err := r.syncAdditionalResources(req, resourceCopy) + if err != nil { + log.Error(err, "error syncing resources") + return nil, err + } + log.Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.AdditionalResources", resourceCopy.AdditionalResources) + } return resourceCopy, nil } @@ -154,10 +177,47 @@ func (r *StorageReconciler) deleteExternalResources(instance *servicev1alpha1.St ctx := context.Background() log := r.Log.WithValues("Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - log.Info("Deleting Storage Account", "Storage.Status.ResourceGroupName", instance.Status.ResourceGroupName) - _, err := group.DeleteGroup(ctx, instance.Status.ResourceGroupName) - return err + resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "storage", instance.Name, instance.Namespace) + log.Info("Deleting Storage Account", "ResourceGroupName", resourceGroupName) + _, err := group.DeleteGroup(ctx, resourceGroupName) + if err != nil { + return err + } + + err = helpers.DeleteSecret(instance.Name, instance.Namespace) + if err != nil { + return err + } + + return nil +} + +func (r *StorageReconciler) syncAdditionalResources(req ctrl.Request, s *servicev1alpha1.Storage) (err error) { + ctx := context.Background() + log := r.Log.WithValues("storage", req.NamespacedName) + + resource := &servicev1alpha1.Storage{} + r.Get(ctx, req.NamespacedName, resource) + + secrets := []string{} + secretData := map[string]string{ + "storageAccountName": "{{.Obj.Output.StorageAccountName}}", + "key1": "{{.Obj.Output.Key1}}", + "key2": "{{.Obj.Output.Key2}}", + "connectionString1": "{{.Obj.Output.ConnectionString1}}", + "connectionString2": "{{.Obj.Output.ConnectionString2}}", + } + secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) + secrets = append(secrets, secret) + + resourceCopy := resource.DeepCopy() + resourceCopy.AdditionalResources.Secrets = secrets + + err = r.Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Storage status") + return err + } + + return nil } diff --git a/go.mod b/go.mod index 2fede21bef8..4d09e20f0b3 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,9 @@ require ( github.com/onsi/ginkgo v1.7.0 github.com/onsi/gomega v1.4.3 github.com/satori/go.uuid v1.2.0 + github.com/spf13/pflag v1.0.2 golang.org/x/net v0.0.0-20190311183353-d8887717615a + k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible sigs.k8s.io/controller-runtime v0.2.0-beta.2 diff --git a/main.go b/main.go index 37310630cb2..6c4719219fe 100644 --- a/main.go +++ b/main.go @@ -25,11 +25,14 @@ SOFTWARE package main import ( - "flag" "os" + "strings" + + "github.com/spf13/pflag" servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" "github.com/Azure/azure-service-operator/controllers" + "github.com/Azure/azure-service-operator/pkg/config" "k8s.io/apimachinery/pkg/runtime" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" @@ -38,8 +41,11 @@ import ( ) var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + masterURL, kubeconfig, resources, clusterName string + cloudName, tenantID, subscriptionID, clientID, clientSecret string + useAADPodIdentity bool ) func init() { @@ -51,13 +57,30 @@ func init() { func main() { var metricsAddr string var enableLeaderElection bool - flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, - "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") - flag.Parse() + pflag.StringVarP(&metricsAddr, "metrics-addr", "", ":8080", "The address the metric endpoint binds to.") + pflag.BoolVarP(&enableLeaderElection, "enable-leader-election", "", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + pflag.StringVarP(&masterURL, "master-url", "", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig.") + pflag.StringVarP(&kubeconfig, "kubeconfig", "k", "", "Path to local kubeconfig file (mainly used for development)") + pflag.StringVarP(&resources, "resources", "", "storage,cosmosdb", "Comma delimited list of CRDs to deploy") + pflag.StringVarP(&clusterName, "cluster-name", "i", "azure-operator", "Cluster name for the Application to run as, used to avoid conflict") + pflag.StringVarP(&cloudName, "cloud-name", "c", "AzurePublicCloud", "The cloud name") + pflag.StringVarP(&tenantID, "tenant-id", "t", "", "The AAD tenant, must provide when using service principals") + pflag.StringVarP(&subscriptionID, "subscription-id", "s", "", "The subscription ID") + pflag.StringVarP(&clientID, "client-id", "u", "", "The service principal client ID") + pflag.StringVarP(&clientSecret, "client-secret", "p", "", "The service principal client secret") + pflag.BoolVarP(&useAADPodIdentity, "use-aad-pod-identity", "", false, "whether use AAD pod identity") + pflag.Parse() ctrl.SetLogger(zap.Logger(true)) + cfg := config.Config{} + cfg, err := getConfig() + if err != nil { + setupLog.Error(err, "unable to get config") + os.Exit(1) + } + config.Instance = &cfg + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsAddr, @@ -92,3 +115,29 @@ func main() { os.Exit(1) } } + +func getConfig() (c config.Config, err error) { + resourcesMap := map[string]bool{} + for _, r := range strings.Split(resources, ",") { + resourcesMap[r] = true + } + + kubeclientset, err := config.CreateKubeClientset(masterURL, kubeconfig) + if err != nil { + return c, err + } + + c = config.Config{ + KubeClientset: kubeclientset, + Resources: resourcesMap, + ClusterName: clusterName, + CloudName: cloudName, + TenantID: tenantID, + SubscriptionID: subscriptionID, + ClientID: clientID, + ClientSecret: clientSecret, + UseAADPodIdentity: useAADPodIdentity, + } + + return c, nil +} diff --git a/pkg/client/deployment/deployment.go b/pkg/client/deployment/deployment.go index f5ae77a067d..ee9ff1eb506 100644 --- a/pkg/client/deployment/deployment.go +++ b/pkg/client/deployment/deployment.go @@ -11,7 +11,7 @@ import ( ) func getDeploymentsClient() resources.DeploymentsClient { - deployClient := resources.NewDeploymentsClient(config.SubscriptionID()) + deployClient := resources.NewDeploymentsClient(config.Instance.SubscriptionID) a, _ := iam.GetResourceManagementAuthorizer() deployClient.Authorizer = a return deployClient diff --git a/pkg/client/group/group.go b/pkg/client/group/group.go index fda5e02d2ac..c9dd8742f42 100644 --- a/pkg/client/group/group.go +++ b/pkg/client/group/group.go @@ -5,15 +5,13 @@ import ( "log" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" - - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/azure-service-operator/pkg/config" "github.com/Azure/azure-service-operator/pkg/iam" + "github.com/Azure/go-autorest/autorest/to" ) func getGroupsClient() resources.GroupsClient { - groupsClient := resources.NewGroupsClient(config.SubscriptionID()) + groupsClient := resources.NewGroupsClient(config.Instance.SubscriptionID) a, err := iam.GetResourceManagementAuthorizer() if err != nil { log.Fatalf("failed to initialize authorizer: %v\n", err) @@ -23,13 +21,14 @@ func getGroupsClient() resources.GroupsClient { } // CreateGroup creates a new resource group named by env var -func CreateGroup(ctx context.Context, groupName string) (resources.Group, error) { +func CreateGroup(ctx context.Context, groupName, location string, tags map[string]*string) (resources.Group, error) { groupsClient := getGroupsClient() return groupsClient.CreateOrUpdate( ctx, groupName, resources.Group{ - Location: to.StringPtr("eastus2"), + Location: to.StringPtr(location), + Tags: tags, }) } diff --git a/pkg/config/config.go b/pkg/config/config.go index 464d6827cba..cd1ff39795c 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -2,62 +2,51 @@ package config import ( "fmt" - "os" "github.com/Azure/go-autorest/autorest/azure" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" ) -const ( - CloudNameEnvVar = "CLOUD_NAME" - SubscriptionIDEnvVar = "SUBSCRIPTION_ID" - ClientIDEnvVar = "CLIENT_ID" - ClientSecretEnvVar = "CLIENT_SECRET" - TenantIDEnvVar = "TENANT_ID" - UseAADPodIdentityEnvVar = "USE_AAD_POD_IDENTITY" -) - -// GetEnvVar returns the value of the environment variable -func GetEnvVar(envVarName string) string { - v, found := os.LookupEnv(envVarName) - if !found { - panic(fmt.Sprintf("%s must be set", envVarName)) - } - return v -} +var Instance *Config -// CloudName returns the cloud name -func CloudName() string { - return GetEnvVar(CloudNameEnvVar) +type Config struct { + KubeClientset kubernetes.Interface + Resources map[string]bool `json:"resources"` + ClusterName string `json:"clusterName"` + CloudName string `json:"cloudName"` + TenantID string `json:"tenantID"` + SubscriptionID string `json:"subscriptionID"` + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + UseAADPodIdentity bool `json:"useAADPodIdentity"` } -// GetSubscriptionID returns the subscription ID -func SubscriptionID() string { - return GetEnvVar(SubscriptionIDEnvVar) -} - -// GetClientID returns the client ID -func ClientID() string { - return GetEnvVar(ClientIDEnvVar) +func getKubeconfig(masterURL, kubeconfig string) (*rest.Config, error) { + if kubeconfig != "" { + return clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) + } + return rest.InClusterConfig() } -// GetClientSecret returns the client secret -func ClientSecret() string { - return GetEnvVar(ClientSecretEnvVar) -} +func CreateKubeClientset(masterURL, kubeconfig string) (kubernetes.Interface, error) { + config, err := getKubeconfig(masterURL, kubeconfig) + if err != nil { + return nil, fmt.Errorf("failed to get k8s config. %+v", err) + } -// GetTenantID returns the tenant ID -func TenantID() string { - return GetEnvVar(TenantIDEnvVar) -} + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to get k8s client. %+v", err) + } -// UseAADPodIdentity returns whether AAD Pod Identity is used -func UseAADPodIdentity() bool { - return GetEnvVar(UseAADPodIdentityEnvVar) == "true" + return clientset, nil } // Environment() returns an `azure.Environment{...}` for the current cloud. func Environment() azure.Environment { - cloudName := CloudName() + cloudName := Instance.CloudName env, err := azure.EnvironmentFromName(cloudName) if err != nil { panic(fmt.Sprintf( diff --git a/pkg/helpers/deployment.go b/pkg/helpers/deployment.go new file mode 100644 index 00000000000..46bde127113 --- /dev/null +++ b/pkg/helpers/deployment.go @@ -0,0 +1,36 @@ +package helpers + +import ( + "bytes" + "text/template" +) + +// IsDeploymentComplete will dtermine if the deployment is complete +func IsDeploymentComplete(status string) bool { + switch status { + case "Succeeded": + return true + case "Failed": + return true + case "Canceled": + return true + } + return false +} + +// Templatize returns the proper values based on the templating +func Templatize(tempStr string, data interface{}) (resp string, err error) { + t := template.New("templating") + t, err = t.Parse(string(tempStr)) + if err != nil { + return + } + + var tpl bytes.Buffer + err = t.Execute(&tpl, data) + return tpl.String(), err +} + +func GetOutput(outputs interface{}, key string) string { + return outputs.(map[string]interface{})[key].(map[string]interface{})["value"].(string) +} diff --git a/pkg/helpers/helpers.go b/pkg/helpers/helpers.go index 2d6c60d0762..0dde07e8aab 100644 --- a/pkg/helpers/helpers.go +++ b/pkg/helpers/helpers.go @@ -1,10 +1,17 @@ package helpers import ( + "crypto/md5" "fmt" + "io" + "regexp" "strings" - uuid "github.com/satori/go.uuid" + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + log = ctrl.Log.WithName("helpers") ) func ContainsString(slice []string, s string) bool { @@ -26,11 +33,17 @@ func RemoveString(slice []string, s string) (result []string) { return } -func AzureResourceName(kind string) string { - var name string - switch kind { - case "Storage": - name = fmt.Sprintf("aso%s", strings.ReplaceAll(uuid.NewV4().String(), "-", ""))[:24] - } - return name +// KubernetesResourceName returns the resource name for other components +func KubernetesResourceName(name string) string { + reg, _ := regexp.Compile("[^a-zA-Z0-9_-]+") + return reg.ReplaceAllString(name, "-") +} + +func AzrueResourceGroupName(subscriptionID, clusterName, resourceType, name, namespace string) string { + nameParts := []string{subscriptionID, clusterName, resourceType, name, namespace} + nameString := strings.Join(nameParts, "-") + log.V(1).Info("Getting Azure Resource Group Name", "nameString", nameString) + hash := md5.New() + io.WriteString(hash, nameString) + return fmt.Sprintf("aso-%x", hash.Sum(nil)) } diff --git a/pkg/helpers/secret.go b/pkg/helpers/secret.go new file mode 100644 index 00000000000..eb1e24d151e --- /dev/null +++ b/pkg/helpers/secret.go @@ -0,0 +1,51 @@ +package helpers + +import ( + "github.com/Azure/azure-service-operator/pkg/config" + apiv1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func CreateSecret(resource interface{}, svcName, svcNamespace string, secretTemplate map[string]string) string { + data := map[string]string{} + for key, value := range secretTemplate { + tempValue, err := Templatize(value, Data{Obj: resource}) + if err != nil { + log.Error(err, "error parsing config map template") + return "" + } + data[key] = tempValue + } + + secretName := KubernetesResourceName(svcName) + secretObj := &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: svcNamespace, + }, + StringData: data, + } + + _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Get(secretName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + log.Info("Creating Secret", "Secret.Name", secretName) + _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Create(secretObj) + if err != nil { + log.Error(err, "error creating Secret") + } + } else { + log.Info("Updating Secret", "Secret.Name", secretName) + _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Update(secretObj) + if err != nil { + log.Error(err, "error updating Secret") + } + } + + return secretName +} + +func DeleteSecret(svcName, svcNamespace string) error { + secretName := KubernetesResourceName(svcName) + return config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Delete(secretName, &metav1.DeleteOptions{}) +} diff --git a/pkg/helpers/service.go b/pkg/helpers/service.go new file mode 100644 index 00000000000..2cebc8518f8 --- /dev/null +++ b/pkg/helpers/service.go @@ -0,0 +1,57 @@ +package helpers + +import ( + "strconv" + "strings" + + "github.com/Azure/azure-service-operator/pkg/config" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CreateExternalNameService will create a Kubernetes Servic Using ExternalName types +func CreateExternalNameService(resource interface{}, svcName string, svcNamespace string, externalNameTemplate string, svcPortTemplate string) string { + externalName, err := Templatize(externalNameTemplate, Data{Obj: resource}) + if err != nil { + log.Error(err, "error parsing external name template") + return "" + } + + svcPortString, err := Templatize(svcPortTemplate, Data{Obj: resource}) + if err != nil { + log.Error(err, "error parsing service port template") + return "" + } + + svcPortStripSlash := strings.Replace(svcPortString, "\\", "", -1) + + svcPortInt64, err := strconv.ParseInt(svcPortStripSlash, 0, 16) + if err != nil { + log.Error(err, "error converting service port template string to int") + return "" + } + + // ParseInt only returns an int64, must convert to int32 for apiv1.ServicePort field + svcPort := int32(svcPortInt64) + + service := &apiv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: KubernetesResourceName(svcName), + }, + Spec: apiv1.ServiceSpec{ + Type: apiv1.ServiceTypeExternalName, + ExternalName: externalName, + Ports: []apiv1.ServicePort{ + apiv1.ServicePort{ + Port: svcPort, + }, + }, + }, + } + + newService, err := config.Instance.KubeClientset.CoreV1().Services(svcNamespace).Create(service) + if err != nil { + log.Error(err, "error creating service") + } + return newService.Name +} diff --git a/pkg/helpers/types.go b/pkg/helpers/types.go new file mode 100644 index 00000000000..ee69776d1c6 --- /dev/null +++ b/pkg/helpers/types.go @@ -0,0 +1,6 @@ +package helpers + +// Data wrapps the object that is needed for the services +type Data struct { + Obj interface{} +} diff --git a/pkg/iam/authorizers.go b/pkg/iam/authorizers.go index 1f6025e4b12..3561073ec58 100644 --- a/pkg/iam/authorizers.go +++ b/pkg/iam/authorizers.go @@ -21,7 +21,7 @@ func GetResourceManagementAuthorizer() (autorest.Authorizer, error) { var a autorest.Authorizer var err error - if config.UseAADPodIdentity() { + if config.Instance.UseAADPodIdentity { a, err = auth.NewAuthorizerFromEnvironment() } else { a, err = getAuthorizerForResource(config.Environment().ResourceManagerEndpoint) @@ -42,13 +42,13 @@ func getAuthorizerForResource(resource string) (autorest.Authorizer, error) { var err error oauthConfig, err := adal.NewOAuthConfig( - config.Environment().ActiveDirectoryEndpoint, config.TenantID()) + config.Environment().ActiveDirectoryEndpoint, config.Instance.TenantID) if err != nil { return nil, err } token, err := adal.NewServicePrincipalToken( - *oauthConfig, config.ClientID(), config.ClientSecret(), resource) + *oauthConfig, config.Instance.ClientID, config.Instance.ClientSecret, resource) if err != nil { return nil, err } diff --git a/pkg/storage/storage_template.go b/pkg/storage/storage_template.go index b73e7458b2f..85d58fb9f43 100644 --- a/pkg/storage/storage_template.go +++ b/pkg/storage/storage_template.go @@ -8,7 +8,6 @@ import ( azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" "github.com/Azure/azure-service-operator/pkg/client/deployment" - "github.com/Azure/azure-service-operator/pkg/helpers" ) // New generates a new object @@ -30,9 +29,6 @@ func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName strin "location": map[string]interface{}{ "value": t.Storage.Spec.Location, }, - "storageAccountName": map[string]interface{}{ - "value": helpers.AzureResourceName(t.Storage.Kind), - }, "accountType": map[string]interface{}{ "value": t.Storage.Spec.Sku.Name, }, diff --git a/pkg/template/storage.json b/pkg/template/storage.json deleted file mode 100644 index 80d2595020e..00000000000 --- a/pkg/template/storage.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "location": { - "type": "String" - }, - "storageAccountName": { - "type": "String" - }, - "accountType": { - "type": "String" - }, - "kind": { - "type": "String" - }, - "accessTier": { - "type": "String" - }, - "supportsHttpsTrafficOnly": { - "type": "Bool" - } - }, - "variables": {}, - "resources": [ - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "2018-07-01", - "name": "[parameters('storageAccountName')]", - "location": "[parameters('location')]", - "dependsOn": [], - "sku": { - "name": "[parameters('accountType')]" - }, - "kind": "[parameters('kind')]", - "properties": { - "accessTier": "[parameters('accessTier')]", - "supportsHttpsTrafficOnly": "[parameters('supportsHttpsTrafficOnly')]" - } - } - ], - "outputs": {} -} diff --git a/template/storage.json b/template/storage.json new file mode 100644 index 00000000000..893f47ae8e7 --- /dev/null +++ b/template/storage.json @@ -0,0 +1,63 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "String" + }, + "accountType": { + "type": "String" + }, + "kind": { + "type": "String" + }, + "accessTier": { + "type": "String" + }, + "supportsHttpsTrafficOnly": { + "type": "Bool" + } + }, + "variables": { + "storageAccountName": "[concat('aso', uniqueString(resourceGroup().id))]" + }, + "resources": [ + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "2018-07-01", + "name": "[variables('storageAccountName')]", + "location": "[parameters('location')]", + "dependsOn": [], + "sku": { + "name": "[parameters('accountType')]" + }, + "kind": "[parameters('kind')]", + "properties": { + "accessTier": "[parameters('accessTier')]", + "supportsHttpsTrafficOnly": "[parameters('supportsHttpsTrafficOnly')]" + } + } + ], + "outputs": { + "storageAccountName": { + "type": "string", + "value": "[variables('storageAccountName')]" + }, + "key1": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[0].value]" + }, + "key2": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[1].value]" + }, + "connectionString1": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=',variables('storageAccountName'),';AccountKey=',listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[0].value,';EndpointSuffix=core.windows.net')]" + }, + "connectionString2": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=',variables('storageAccountName'),';AccountKey=',listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[1].value,';EndpointSuffix=core.windows.net')]" + } + } +} From b29b4c0cd87dc94158e0a199d08f33798b232fb4 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Mon, 22 Jul 2019 01:45:36 +0800 Subject: [PATCH 09/34] Upgrade kubebuilder to 2.0.0-beta.0 and controller-runtime to v0.2.0-beta.4 --- Makefile | 9 ++++++++- go.mod | 2 +- go.sum | 13 ++++++------- main.go | 2 +- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index a32868b3a4f..c6eb78b300c 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,13 @@ IMG ?= controller:latest # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) CRD_OPTIONS ?= "crd:trivialVersions=true" +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + all: manager # Run tests @@ -63,7 +70,7 @@ docker-push: # download controller-gen if necessary controller-gen: ifeq (, $(shell which controller-gen)) - go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.0-beta.2 + go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.0-beta.4 CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen else CONTROLLER_GEN=$(shell which controller-gen) diff --git a/go.mod b/go.mod index 4d09e20f0b3..36b886d0a45 100644 --- a/go.mod +++ b/go.mod @@ -18,5 +18,5 @@ require ( k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible - sigs.k8s.io/controller-runtime v0.2.0-beta.2 + sigs.k8s.io/controller-runtime v0.2.0-beta.4 ) diff --git a/go.sum b/go.sum index 36a5d41e1c5..672576a0b80 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= @@ -51,9 +49,8 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= -github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -224,6 +221,8 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +gomodules.xyz/jsonpatch/v2 v2.0.0 h1:OyHbl+7IOECpPKfVK42oFr6N7+Y2dR+Jsb/IiDV3hOo= +gomodules.xyz/jsonpatch/v2 v2.0.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= @@ -267,8 +266,8 @@ k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/ k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -sigs.k8s.io/controller-runtime v0.2.0-beta.2 h1:hOWldx1qmGI9TsU+uUsq1xTgVmUV7AZo08VAYX0dwGI= -sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= +sigs.k8s.io/controller-runtime v0.2.0-beta.4 h1:S1XVfRWR1MuIXZdkYx3jN8JDw+bbQxmWZroy0i87z/A= +sigs.k8s.io/controller-runtime v0.2.0-beta.4/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME= sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= diff --git a/main.go b/main.go index 6c4719219fe..714e66b9b34 100644 --- a/main.go +++ b/main.go @@ -50,7 +50,7 @@ var ( func init() { - servicev1alpha1.AddToScheme(scheme) + _ = servicev1alpha1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } From 4e5abd45fb85befd87fe6254753d629171669202 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Mon, 22 Jul 2019 02:51:50 +0800 Subject: [PATCH 10/34] Copy pkg in Dockerfile --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index e6c589aebd9..4deb2a325e6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,6 +13,7 @@ RUN go mod download COPY main.go main.go COPY api/ api/ COPY controllers/ controllers/ +COPY pkg/ pkg/ # Build RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go From ca57fb60c43c9c5a59dd782110e0dd8afa413404 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Mon, 22 Jul 2019 12:58:55 +0800 Subject: [PATCH 11/34] Update controller-gen and make manifests --- config/crd/bases/service.azure_cosmosdbs.yaml | 358 +---------------- config/crd/bases/service.azure_storages.yaml | 367 +----------------- config/rbac/role.yaml | 20 +- go.mod | 9 +- go.sum | 39 ++ 5 files changed, 66 insertions(+), 727 deletions(-) diff --git a/config/crd/bases/service.azure_cosmosdbs.yaml b/config/crd/bases/service.azure_cosmosdbs.yaml index af8e7da57a8..b4d852bb61e 100644 --- a/config/crd/bases/service.azure_cosmosdbs.yaml +++ b/config/crd/bases/service.azure_cosmosdbs.yaml @@ -26,366 +26,12 @@ spec: submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' type: string metadata: - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored with - a resource that may be set by external tools to store and retrieve - arbitrary metadata. They are not queryable and should be preserved - when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - clusterName: - description: The name of the cluster which the object belongs to. This - is used to distinguish resources with same name and namespace in different - clusters. This field is not set anywhere right now and apiserver is - going to ignore it if set in create or update request. - type: string - creationTimestamp: - description: "CreationTimestamp is a timestamp representing the server - time when this object was created. It is not guaranteed to be set - in happens-before order across separate operations. Clients may not - set this value. It is represented in RFC3339 form and is in UTC. \n - Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to gracefully - terminate before it will be removed from the system. Only set when - deletionTimestamp is also set. May only be shortened. Read-only. - format: int64 - type: integer - deletionTimestamp: - description: "DeletionTimestamp is RFC 3339 date and time at which this - resource will be deleted. This field is set by the server when a graceful - deletion is requested by the user, and is not directly settable by - a client. The resource is expected to be deleted (no longer visible - from resource lists, and not reachable by name) after the time in - this field, once the finalizers list is empty. As long as the finalizers - list contains items, deletion is blocked. Once the deletionTimestamp - is set, this value may not be unset or be set further into the future, - although it may be shortened or the resource may be deleted prior - to this time. For example, a user may request that a pod is deleted - in 30 seconds. The Kubelet will react by sending a graceful termination - signal to the containers in the pod. After that 30 seconds, the Kubelet - will send a hard termination signal (SIGKILL) to the container and - after cleanup, remove the pod from the API. In the presence of network - partitions, this object may still exist after this timestamp, until - an administrator or automated process can determine the resource is - fully terminated. If not set, graceful deletion of the object has - not been requested. \n Populated by the system when a graceful deletion - is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - finalizers: - description: Must be empty before the object is deleted from the registry. - Each entry is an identifier for the responsible component that will - remove the entry from the list. If the deletionTimestamp of the object - is non-nil, entries in this list can only be removed. - items: - type: string - type: array - generateName: - description: "GenerateName is an optional prefix, used by the server, - to generate a unique name ONLY IF the Name field has not been provided. - If this field is used, the name returned to the client will be different - than the name passed. This value will also be combined with a unique - suffix. The provided value has the same validation rules as the Name - field, and may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is specified - and the generated name exists, the server will NOT return a 409 - - instead, it will either return 201 Created or 500 with Reason ServerTimeout - indicating a unique name could not be found in the time allotted, - and the client should retry (optionally after the time indicated in - the Retry-After header). \n Applied only if Name is not specified. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" - type: string - generation: - description: A sequence number representing a specific generation of - the desired state. Populated by the system. Read-only. - format: int64 - type: integer - initializers: - description: "An initializer is a controller which enforces some system - invariant at object creation time. This field is a list of initializers - that have not yet acted on this object. If nil or empty, this object - has been completely initialized. Otherwise, the object is considered - uninitialized and is hidden (in list/watch and get calls) from clients - that haven't explicitly asked to observe uninitialized objects. \n - When an object is created, the system will populate this list with - the current set of initializers. Only privileged users may set or - modify this list. Once it is empty, it may not be modified further - by any user. \n DEPRECATED - initializers are an alpha field and will - be removed in v1.15." - properties: - pending: - description: Pending is a list of initializers that must execute - in order before this object is visible. When the last pending - initializer is removed, and no failing result is set, the initializers - struct will be set to nil and the object is considered as initialized - and visible to all clients. - items: - properties: - name: - description: name of the process that is responsible for initializing - this object. - type: string - required: - - name - type: object - type: array - result: - description: If result is set with the Failure field, the object - will be persisted to storage and then deleted, ensuring that other - clients can observe the deletion. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - code: - description: Suggested HTTP return code for this status, 0 if - not set. - format: int32 - type: integer - details: - description: Extended data associated with the reason. Each - reason may define its own extended details. This field is - optional and the data returned is not guaranteed to conform - to any schema except that defined by the reason type. - properties: - causes: - description: The Causes array includes more details associated - with the StatusReason failure. Not all StatusReasons may - provide detailed causes. - items: - properties: - field: - description: "The field of the resource that has caused - this error, as named by its JSON serialization. - May include dot and postfix notation for nested - attributes. Arrays are zero-indexed. Fields may - appear more than once in an array of causes due - to fields having multiple errors. Optional. \n Examples: - \ \"name\" - the field \"name\" on the current - resource \"items[0].name\" - the field \"name\" - on the first array entry in \"items\"" - type: string - message: - description: A human-readable description of the cause - of the error. This field may be presented as-is - to a reader. - type: string - reason: - description: A machine-readable description of the - cause of the error. If this value is empty there - is no information available. - type: string - type: object - type: array - group: - description: The group attribute of the resource associated - with the status StatusReason. - type: string - kind: - description: 'The kind attribute of the resource associated - with the status StatusReason. On some operations may differ - from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: The name attribute of the resource associated - with the status StatusReason (when there is a single name - which can be described). - type: string - retryAfterSeconds: - description: If specified, the time in seconds before the - operation should be retried. Some errors may indicate - the client must take an alternate action - for those errors - this field may indicate how long to wait before taking - the alternate action. - format: int32 - type: integer - uid: - description: 'UID of the resource. (when there is a single - resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - type: object - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - message: - description: A human-readable description of the status of this - operation. - type: string - metadata: - description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - properties: - continue: - description: continue may be set if the user set a limit - on the number of items returned, and indicates that the - server has more data available. The value is opaque and - may be used to issue another request to the endpoint that - served this list to retrieve the next set of available - objects. Continuing a consistent list may not be possible - if the server configuration has changed or more than a - few minutes have passed. The resourceVersion field returned - when using this continue value will be identical to the - value in the first response, unless you have received - this token from an error message. - type: string - resourceVersion: - description: 'String that identifies the server''s internal - version of this object that can be used by clients to - determine when objects have changed. Value must be treated - as opaque by clients and passed unmodified back to the - server. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - selfLink: - description: selfLink is a URL representing this object. - Populated by the system. Read-only. - type: string - type: object - reason: - description: A machine-readable description of why this operation - is in the "Failure" status. If this value is empty there is - no information available. A Reason clarifies an HTTP status - code but does not override it. - type: string - status: - description: 'Status of the operation. One of: "Success" or - "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' - type: string - type: object - required: - - pending - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to organize - and categorize (scope and select) objects. May match selectors of - replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' - type: object - managedFields: - description: "ManagedFields maps workflow-id and version to the set - of fields that are managed by that workflow. This is mostly for internal - housekeeping, and users typically shouldn't need to set or understand - this field. A workflow can be the user's name, a controller's name, - or the name of a specific apply path like \"ci-cd\". The set of fields - is always in the version that the workflow used when modifying the - object. \n This field is alpha and can be changed or removed without - notice." - items: - properties: - apiVersion: - description: APIVersion defines the version of this resource that - this field set applies to. The format is "group/version" just - like the top-level APIVersion field. It is necessary to track - the version of a field set because it cannot be automatically - converted. - type: string - fields: - additionalProperties: true - description: Fields identifies a set of fields. - type: object - manager: - description: Manager is an identifier of the workflow managing - these fields. - type: string - operation: - description: Operation is the type of operation which lead to - this ManagedFieldsEntry being created. The only valid values - for this field are 'Apply' and 'Update'. - type: string - time: - description: Time is timestamp of when these fields were set. - It should always be empty if Operation is 'Apply' - format: date-time - type: string - type: object - type: array - name: - description: 'Name must be unique within a namespace. Is required when - creating resources, although some resources may allow a client to - request the generation of an appropriate name automatically. Name - is primarily intended for creation idempotence and configuration definition. - Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - namespace: - description: "Namespace defines the space within each name must be unique. - An empty namespace is equivalent to the \"default\" namespace, but - \"default\" is the canonical representation. Not all objects are required - to be scoped to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/namespaces" - type: string - ownerReferences: - description: List of objects depended by this object. If ALL objects - in the list have been deleted, this object will be garbage collected. - If this object is managed by a controller, then an entry in this list - will point to this controller, with the controller field set to true. - There cannot be more than one managing controller. - items: - properties: - apiVersion: - description: API version of the referent. - type: string - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the key-value - store until this reference is removed. Defaults to false. To - set this field, a user needs "delete" permission of the owner, - otherwise 422 (Unprocessable Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the managing controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - required: - - apiVersion - - kind - - name - - uid - type: object - type: array - resourceVersion: - description: "An opaque value that represents the internal version of - this object that can be used by clients to determine when objects - have changed. May be used for optimistic concurrency, change detection, - and the watch operation on a resource or set of resources. Clients - must treat these values as opaque and passed unmodified back to the - server. They may only be valid for a particular resource or set of - resources. \n Populated by the system. Read-only. Value must be treated - as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" - type: string - selfLink: - description: SelfLink is a URL representing this object. Populated by - the system. Read-only. - type: string - uid: - description: "UID is the unique in time and space value for this object. - It is typically generated by the server on successful creation of - a resource and is not allowed to change on PUT operations. \n Populated - by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" - type: string type: object spec: + description: CosmosDBSpec defines the desired state of CosmosDB type: object status: + description: CosmosDBStatus defines the observed state of CosmosDB type: object type: object versions: diff --git a/config/crd/bases/service.azure_storages.yaml b/config/crd/bases/service.azure_storages.yaml index 321f7fd942d..91a866eb1bb 100644 --- a/config/crd/bases/service.azure_storages.yaml +++ b/config/crd/bases/service.azure_storages.yaml @@ -18,6 +18,7 @@ spec: description: Storage is the Schema for the storages API properties: additionalResources: + description: StorageAdditionalResources holds the additional resources properties: secrets: items: @@ -35,362 +36,6 @@ spec: submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' type: string metadata: - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored with - a resource that may be set by external tools to store and retrieve - arbitrary metadata. They are not queryable and should be preserved - when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - clusterName: - description: The name of the cluster which the object belongs to. This - is used to distinguish resources with same name and namespace in different - clusters. This field is not set anywhere right now and apiserver is - going to ignore it if set in create or update request. - type: string - creationTimestamp: - description: "CreationTimestamp is a timestamp representing the server - time when this object was created. It is not guaranteed to be set - in happens-before order across separate operations. Clients may not - set this value. It is represented in RFC3339 form and is in UTC. \n - Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to gracefully - terminate before it will be removed from the system. Only set when - deletionTimestamp is also set. May only be shortened. Read-only. - format: int64 - type: integer - deletionTimestamp: - description: "DeletionTimestamp is RFC 3339 date and time at which this - resource will be deleted. This field is set by the server when a graceful - deletion is requested by the user, and is not directly settable by - a client. The resource is expected to be deleted (no longer visible - from resource lists, and not reachable by name) after the time in - this field, once the finalizers list is empty. As long as the finalizers - list contains items, deletion is blocked. Once the deletionTimestamp - is set, this value may not be unset or be set further into the future, - although it may be shortened or the resource may be deleted prior - to this time. For example, a user may request that a pod is deleted - in 30 seconds. The Kubelet will react by sending a graceful termination - signal to the containers in the pod. After that 30 seconds, the Kubelet - will send a hard termination signal (SIGKILL) to the container and - after cleanup, remove the pod from the API. In the presence of network - partitions, this object may still exist after this timestamp, until - an administrator or automated process can determine the resource is - fully terminated. If not set, graceful deletion of the object has - not been requested. \n Populated by the system when a graceful deletion - is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - finalizers: - description: Must be empty before the object is deleted from the registry. - Each entry is an identifier for the responsible component that will - remove the entry from the list. If the deletionTimestamp of the object - is non-nil, entries in this list can only be removed. - items: - type: string - type: array - generateName: - description: "GenerateName is an optional prefix, used by the server, - to generate a unique name ONLY IF the Name field has not been provided. - If this field is used, the name returned to the client will be different - than the name passed. This value will also be combined with a unique - suffix. The provided value has the same validation rules as the Name - field, and may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is specified - and the generated name exists, the server will NOT return a 409 - - instead, it will either return 201 Created or 500 with Reason ServerTimeout - indicating a unique name could not be found in the time allotted, - and the client should retry (optionally after the time indicated in - the Retry-After header). \n Applied only if Name is not specified. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" - type: string - generation: - description: A sequence number representing a specific generation of - the desired state. Populated by the system. Read-only. - format: int64 - type: integer - initializers: - description: "An initializer is a controller which enforces some system - invariant at object creation time. This field is a list of initializers - that have not yet acted on this object. If nil or empty, this object - has been completely initialized. Otherwise, the object is considered - uninitialized and is hidden (in list/watch and get calls) from clients - that haven't explicitly asked to observe uninitialized objects. \n - When an object is created, the system will populate this list with - the current set of initializers. Only privileged users may set or - modify this list. Once it is empty, it may not be modified further - by any user. \n DEPRECATED - initializers are an alpha field and will - be removed in v1.15." - properties: - pending: - description: Pending is a list of initializers that must execute - in order before this object is visible. When the last pending - initializer is removed, and no failing result is set, the initializers - struct will be set to nil and the object is considered as initialized - and visible to all clients. - items: - properties: - name: - description: name of the process that is responsible for initializing - this object. - type: string - required: - - name - type: object - type: array - result: - description: If result is set with the Failure field, the object - will be persisted to storage and then deleted, ensuring that other - clients can observe the deletion. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - code: - description: Suggested HTTP return code for this status, 0 if - not set. - format: int32 - type: integer - details: - description: Extended data associated with the reason. Each - reason may define its own extended details. This field is - optional and the data returned is not guaranteed to conform - to any schema except that defined by the reason type. - properties: - causes: - description: The Causes array includes more details associated - with the StatusReason failure. Not all StatusReasons may - provide detailed causes. - items: - properties: - field: - description: "The field of the resource that has caused - this error, as named by its JSON serialization. - May include dot and postfix notation for nested - attributes. Arrays are zero-indexed. Fields may - appear more than once in an array of causes due - to fields having multiple errors. Optional. \n Examples: - \ \"name\" - the field \"name\" on the current - resource \"items[0].name\" - the field \"name\" - on the first array entry in \"items\"" - type: string - message: - description: A human-readable description of the cause - of the error. This field may be presented as-is - to a reader. - type: string - reason: - description: A machine-readable description of the - cause of the error. If this value is empty there - is no information available. - type: string - type: object - type: array - group: - description: The group attribute of the resource associated - with the status StatusReason. - type: string - kind: - description: 'The kind attribute of the resource associated - with the status StatusReason. On some operations may differ - from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: The name attribute of the resource associated - with the status StatusReason (when there is a single name - which can be described). - type: string - retryAfterSeconds: - description: If specified, the time in seconds before the - operation should be retried. Some errors may indicate - the client must take an alternate action - for those errors - this field may indicate how long to wait before taking - the alternate action. - format: int32 - type: integer - uid: - description: 'UID of the resource. (when there is a single - resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - type: object - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - message: - description: A human-readable description of the status of this - operation. - type: string - metadata: - description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - properties: - continue: - description: continue may be set if the user set a limit - on the number of items returned, and indicates that the - server has more data available. The value is opaque and - may be used to issue another request to the endpoint that - served this list to retrieve the next set of available - objects. Continuing a consistent list may not be possible - if the server configuration has changed or more than a - few minutes have passed. The resourceVersion field returned - when using this continue value will be identical to the - value in the first response, unless you have received - this token from an error message. - type: string - resourceVersion: - description: 'String that identifies the server''s internal - version of this object that can be used by clients to - determine when objects have changed. Value must be treated - as opaque by clients and passed unmodified back to the - server. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - selfLink: - description: selfLink is a URL representing this object. - Populated by the system. Read-only. - type: string - type: object - reason: - description: A machine-readable description of why this operation - is in the "Failure" status. If this value is empty there is - no information available. A Reason clarifies an HTTP status - code but does not override it. - type: string - status: - description: 'Status of the operation. One of: "Success" or - "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' - type: string - type: object - required: - - pending - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to organize - and categorize (scope and select) objects. May match selectors of - replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' - type: object - managedFields: - description: "ManagedFields maps workflow-id and version to the set - of fields that are managed by that workflow. This is mostly for internal - housekeeping, and users typically shouldn't need to set or understand - this field. A workflow can be the user's name, a controller's name, - or the name of a specific apply path like \"ci-cd\". The set of fields - is always in the version that the workflow used when modifying the - object. \n This field is alpha and can be changed or removed without - notice." - items: - properties: - apiVersion: - description: APIVersion defines the version of this resource that - this field set applies to. The format is "group/version" just - like the top-level APIVersion field. It is necessary to track - the version of a field set because it cannot be automatically - converted. - type: string - fields: - additionalProperties: true - description: Fields identifies a set of fields. - type: object - manager: - description: Manager is an identifier of the workflow managing - these fields. - type: string - operation: - description: Operation is the type of operation which lead to - this ManagedFieldsEntry being created. The only valid values - for this field are 'Apply' and 'Update'. - type: string - time: - description: Time is timestamp of when these fields were set. - It should always be empty if Operation is 'Apply' - format: date-time - type: string - type: object - type: array - name: - description: 'Name must be unique within a namespace. Is required when - creating resources, although some resources may allow a client to - request the generation of an appropriate name automatically. Name - is primarily intended for creation idempotence and configuration definition. - Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - namespace: - description: "Namespace defines the space within each name must be unique. - An empty namespace is equivalent to the \"default\" namespace, but - \"default\" is the canonical representation. Not all objects are required - to be scoped to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/namespaces" - type: string - ownerReferences: - description: List of objects depended by this object. If ALL objects - in the list have been deleted, this object will be garbage collected. - If this object is managed by a controller, then an entry in this list - will point to this controller, with the controller field set to true. - There cannot be more than one managing controller. - items: - properties: - apiVersion: - description: API version of the referent. - type: string - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the key-value - store until this reference is removed. Defaults to false. To - set this field, a user needs "delete" permission of the owner, - otherwise 422 (Unprocessable Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the managing controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - required: - - apiVersion - - kind - - name - - uid - type: object - type: array - resourceVersion: - description: "An opaque value that represents the internal version of - this object that can be used by clients to determine when objects - have changed. May be used for optimistic concurrency, change detection, - and the watch operation on a resource or set of resources. Clients - must treat these values as opaque and passed unmodified back to the - server. They may only be valid for a particular resource or set of - resources. \n Populated by the system. Read-only. Value must be treated - as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" - type: string - selfLink: - description: SelfLink is a URL representing this object. Populated by - the system. Read-only. - type: string - uid: - description: "UID is the unique in time and space value for this object. - It is typically generated by the server on successful creation of - a resource and is not allowed to change on PUT operations. \n Populated - by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" - type: string type: object output: properties: @@ -406,13 +51,20 @@ spec: type: string type: object spec: + description: StorageSpec defines the desired state of Storage properties: accessTier: + description: AccessTier enumerates the values for access tier. Only + one of the following access tiers may be specified. If none of the + following access tiers is specified, the default one is Hot. enum: - Cool - Hot type: string kind: + description: Kind enumerates the values for kind. Only one of the following + kinds may be specified. If none of the following kinds is specified, + the default one is StorageV2. enum: - BlobStorage - BlockBlobStorage @@ -421,9 +73,9 @@ spec: - StorageV2 type: string location: - minLength: 0 type: string sku: + description: Sku the SKU of the storage account. properties: name: description: 'Name - The SKU name. Required for account creation; @@ -445,6 +97,7 @@ spec: type: boolean type: object status: + description: StorageStatus defines the observed state of Storage properties: provisioningState: description: 'INSERT ADDITIONAL STATUS FIELD - define observed state diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index d6630859e19..373e0a55c17 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -11,38 +11,38 @@ rules: resources: - cosmosdbs verbs: + - create + - delete - get - list - - watch - - create - - update - patch - - delete + - update + - watch - apiGroups: - service.azure resources: - cosmosdbs/status verbs: - get - - update - patch + - update - apiGroups: - service.azure resources: - storages verbs: + - create + - delete - get - list - - watch - - create - - update - patch - - delete + - update + - watch - apiGroups: - service.azure resources: - storages/status verbs: - get - - update - patch + - update diff --git a/go.mod b/go.mod index 36b886d0a45..d7b34961ee9 100644 --- a/go.mod +++ b/go.mod @@ -10,13 +10,14 @@ require ( github.com/Azure/go-autorest/autorest/to v0.2.0 github.com/Azure/go-autorest/autorest/validation v0.1.0 // indirect github.com/go-logr/logr v0.1.0 - github.com/onsi/ginkgo v1.7.0 - github.com/onsi/gomega v1.4.3 + github.com/onsi/ginkgo v1.8.0 + github.com/onsi/gomega v1.5.0 github.com/satori/go.uuid v1.2.0 - github.com/spf13/pflag v1.0.2 - golang.org/x/net v0.0.0-20190311183353-d8887717615a + github.com/spf13/pflag v1.0.3 + golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible sigs.k8s.io/controller-runtime v0.2.0-beta.4 + sigs.k8s.io/controller-tools v0.2.0-beta.4 // indirect ) diff --git a/go.sum b/go.sum index 672576a0b80..47b8e9048d7 100644 --- a/go.sum +++ b/go.sum @@ -51,6 +51,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -61,10 +63,14 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= +github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= @@ -77,6 +83,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= @@ -93,9 +100,13 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -104,6 +115,10 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -112,15 +127,19 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= @@ -154,8 +173,12 @@ github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -192,6 +215,8 @@ golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= @@ -211,16 +236,25 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 h1:cGjJzUd8RgBw428LXP65YXni0aiGNA4Bl+ls8SmLOm8= +golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190501045030-23463209683d h1:D7DVZUZEUgsSIDTivnUtVeGfN5AvhDIKtdIZAqx0ieE= +golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= gomodules.xyz/jsonpatch/v2 v2.0.0 h1:OyHbl+7IOECpPKfVK42oFr6N7+Y2dR+Jsb/IiDV3hOo= gomodules.xyz/jsonpatch/v2 v2.0.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= @@ -250,6 +284,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= @@ -260,6 +296,7 @@ k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7 k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= @@ -268,6 +305,8 @@ k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/controller-runtime v0.2.0-beta.4 h1:S1XVfRWR1MuIXZdkYx3jN8JDw+bbQxmWZroy0i87z/A= sigs.k8s.io/controller-runtime v0.2.0-beta.4/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME= +sigs.k8s.io/controller-tools v0.2.0-beta.4 h1:W+coTe+nkVNclQrikwlRp6GJKwgcrHzvIQZ9kCaak5A= +sigs.k8s.io/controller-tools v0.2.0-beta.4/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y= sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= From 56e410c37e0c1331a34ce30400398c3e1cbd1bda Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Mon, 22 Jul 2019 13:41:08 +0800 Subject: [PATCH 12/34] Add prefix "Storage" for storage_types --- api/v1alpha1/storage_types.go | 44 +++++--------------- api/v1alpha1/zz_generated.deepcopy.go | 30 ++++++------- config/crd/bases/service.azure_storages.yaml | 6 +-- config/samples/service_v1alpha1_storage.yaml | 1 - docs/development.md | 15 +++++-- go.sum | 2 + 6 files changed, 42 insertions(+), 56 deletions(-) diff --git a/api/v1alpha1/storage_types.go b/api/v1alpha1/storage_types.go index 6a91a2f9a72..1046171ceb1 100644 --- a/api/v1alpha1/storage_types.go +++ b/api/v1alpha1/storage_types.go @@ -40,66 +40,42 @@ type StorageSpec struct { Location string `json:"location,omitempty"` - Sku Sku `json:"sku,omitempty"` + Sku StorageSku `json:"sku,omitempty"` - Kind Kind `json:"kind,omitempty"` + Kind StorageKind `json:"kind,omitempty"` - AccessTier AccessTier `json:"accessTier,omitempty"` + AccessTier StorageAccessTier `json:"accessTier,omitempty"` EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` } // Sku the SKU of the storage account. -type Sku struct { +type StorageSku struct { // Name - The SKU name. Required for account creation; optional for update. // Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS' - Name SkuName `json:"name,omitempty"` + Name StorageSkuName `json:"name,omitempty"` } -// SkuName enumerates the values for sku name. +// StorageSkuName enumerates the values for sku name. // Only one of the following sku names may be specified. // If none of the following sku names is specified, the default one // is StorageV2. // +kubebuilder:validation:Enum=Premium_LRS;Premium_ZRS;Standard_GRS;Standard_GZRS;Standard_LRS;Standard_RAGRS;Standard_RAGZRS;Standard_ZRS -type SkuName string - -const ( - PremiumLRS SkuName = "Premium_LRS" - PremiumZRS SkuName = "Premium_ZRS" - StandardGRS SkuName = "Standard_GRS" - StandardGZRS SkuName = "Standard_GZRS" - StandardLRS SkuName = "Standard_LRS" - StandardRAGRS SkuName = "Standard_RAGRS" - StandardRAGZRS SkuName = "Standard_RAGZRS" - StandardZRS SkuName = "Standard_ZRS" -) +type StorageSkuName string -// Kind enumerates the values for kind. +// StorageKind enumerates the values for kind. // Only one of the following kinds may be specified. // If none of the following kinds is specified, the default one // is StorageV2. // +kubebuilder:validation:Enum=BlobStorage;BlockBlobStorage;FileStorage;Storage;StorageV2 -type Kind string - -const ( - BlobStorage Kind = "BlobStorage" - BlockBlobStorage Kind = "BlockBlobStorage" - FileStorage Kind = "FileStorage" - StorageV1 Kind = "Storage" - StorageV2 Kind = "StorageV2" -) +type StorageKind string // AccessTier enumerates the values for access tier. // Only one of the following access tiers may be specified. // If none of the following access tiers is specified, the default one // is Hot. // +kubebuilder:validation:Enum=Cool;Hot -type AccessTier string - -const ( - Cool AccessTier = "Cool" - Hot AccessTier = "Hot" -) +type StorageAccessTier string // StorageStatus defines the observed state of Storage type StorageStatus struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 05212d787cd..0c4c445c975 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -121,21 +121,6 @@ func (in *CosmosDBStatus) DeepCopy() *CosmosDBStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sku) DeepCopyInto(out *Sku) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sku. -func (in *Sku) DeepCopy() *Sku { - if in == nil { - return nil - } - out := new(Sku) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Storage) DeepCopyInto(out *Storage) { *out = *in @@ -232,6 +217,21 @@ func (in *StorageOutput) DeepCopy() *StorageOutput { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSku) DeepCopyInto(out *StorageSku) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSku. +func (in *StorageSku) DeepCopy() *StorageSku { + if in == nil { + return nil + } + out := new(StorageSku) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { *out = *in diff --git a/config/crd/bases/service.azure_storages.yaml b/config/crd/bases/service.azure_storages.yaml index 91a866eb1bb..8268ba34d83 100644 --- a/config/crd/bases/service.azure_storages.yaml +++ b/config/crd/bases/service.azure_storages.yaml @@ -62,9 +62,9 @@ spec: - Hot type: string kind: - description: Kind enumerates the values for kind. Only one of the following - kinds may be specified. If none of the following kinds is specified, - the default one is StorageV2. + description: StorageKind enumerates the values for kind. Only one of + the following kinds may be specified. If none of the following kinds + is specified, the default one is StorageV2. enum: - BlobStorage - BlockBlobStorage diff --git a/config/samples/service_v1alpha1_storage.yaml b/config/samples/service_v1alpha1_storage.yaml index 4bd1c65ab35..c7ceb59e84d 100644 --- a/config/samples/service_v1alpha1_storage.yaml +++ b/config/samples/service_v1alpha1_storage.yaml @@ -6,7 +6,6 @@ spec: location: eastus2 sku: name: Standard_RAGRS - tier: Standard kind: StorageV2 accessTier: Hot supportsHttpsTrafficOnly: true diff --git a/docs/development.md b/docs/development.md index 00ef40cfe94..0c4ad2a2434 100644 --- a/docs/development.md +++ b/docs/development.md @@ -5,20 +5,29 @@ To get started you will need: * a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster, e.g. [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). +* [kubebuilder](https://book.kubebuilder.io/quick-start.html#installation) * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -## Test It Out Locally +## Deploy Operator on a Local Cluster -Install the CRDs into the cluster: +### 1. Create Cluster + +``` +kind create cluster +export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" +``` + +### 2. Install CRDs ``` make install ``` +### 3. Run Controller + Setup the environment variables: ``` -export USE_AAD_POD_IDENTITY=false export CLOUD_NAME=AzurePublicCloud export TENANT_ID= export SUBSCRIPTION_ID= diff --git a/go.sum b/go.sum index 47b8e9048d7..bd42b12c09c 100644 --- a/go.sum +++ b/go.sum @@ -134,11 +134,13 @@ github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= From 96ef76ac23ab978bd723561b3b2569248be3579d Mon Sep 17 00:00:00 2001 From: ZeroMagic Date: Mon, 22 Jul 2019 14:42:31 +0000 Subject: [PATCH 13/34] feature: add redis cache service --- PROJECT | 3 + api/v1alpha1/rediscache_types.go | 123 ++++++++++ api/v1alpha1/zz_generated.deepcopy.go | 158 +++++++++++++ .../crd/bases/service.azure_rediscaches.yaml | 91 ++++++++ config/crd/kustomization.yaml | 3 + .../patches/cainjection_in_rediscaches.yaml | 8 + .../crd/patches/webhook_in_rediscaches.yaml | 17 ++ config/rbac/role.yaml | 20 ++ .../samples/service_v1alpha1_rediscache.yaml | 12 + controllers/rediscache_controller.go | 219 ++++++++++++++++++ controllers/suite_test.go | 3 + main.go | 7 + pkg/rediscache/rediscache_template.go | 47 ++++ template/rediscache.json | 55 +++++ 14 files changed, 766 insertions(+) create mode 100644 api/v1alpha1/rediscache_types.go create mode 100644 config/crd/bases/service.azure_rediscaches.yaml create mode 100644 config/crd/patches/cainjection_in_rediscaches.yaml create mode 100644 config/crd/patches/webhook_in_rediscaches.yaml create mode 100644 config/samples/service_v1alpha1_rediscache.yaml create mode 100644 controllers/rediscache_controller.go create mode 100644 pkg/rediscache/rediscache_template.go create mode 100644 template/rediscache.json diff --git a/PROJECT b/PROJECT index 908a6362491..0e56c922b14 100644 --- a/PROJECT +++ b/PROJECT @@ -8,3 +8,6 @@ resources: - group: service version: v1alpha1 kind: CosmosDB +- group: service + version: v1alpha1 + kind: RedisCache diff --git a/api/v1alpha1/rediscache_types.go b/api/v1alpha1/rediscache_types.go new file mode 100644 index 00000000000..8d1468434e0 --- /dev/null +++ b/api/v1alpha1/rediscache_types.go @@ -0,0 +1,123 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// RedisCacheSpec defines the desired state of RedisCache +type RedisCacheSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:MinLength=0 + + Location string `json:"location,omitempty"` + + Properties RedisCacheProperties `json:"properties,omitempty"` +} + +// RedisCacheProperties the properties of the Redis Cache. +type RedisCacheProperties struct { + Sku RedisCacheSku `json:"sku,omitempty"` + + EnableNonSslPort bool `json:"enableNonSslPort,omitempty"` +} + +// RedisCacheSku the SKU of the Redis Cache. +type RedisCacheSku struct { + // Name - The SKU name. Required for account creation; optional for update. + // Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS' + Name RedisCacheSkuName `json:"name,omitempty"` + + Family RedisCacheSkuFamily `json:"family,omitempty"` + + Capacity int `json:"capacity,omitempty"` +} + +type RedisCacheSkuName string + +const ( + Basic RedisCacheSkuName = "Basic" + Premium RedisCacheSkuName = "Premium" + Standard RedisCacheSkuName = "Standard" +) + +type RedisCacheSkuFamily string + +const ( + C RedisCacheSkuFamily = "C" + P RedisCacheSkuFamily = "P" +) + +// RedisCacheStatus defines the observed state of RedisCache +type RedisCacheStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + ProvisioningState string `json:"provisioningState,omitempty"` +} + +type RedisCacheOutput struct { + RedisCacheName string `json:"redisCacheName,omitempty"` + PrimaryKey string `json:"primaryKey,omitempty"` + SecondaryKey string `json:"secondaryKey,omitempty"` +} + +// StorageAdditionalResources holds the additional resources +type RedisCacheAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RedisCache is the Schema for the rediscaches API +type RedisCache struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RedisCacheSpec `json:"spec,omitempty"` + Status RedisCacheStatus `json:"status,omitempty"` + Output RedisCacheOutput `json:"output,omitempty"` + AdditionalResources RedisCacheAdditionalResources `json:"additionalResources,omitempty"` +} + +// +kubebuilder:object:root=true + +// RedisCacheList contains a list of RedisCache +type RedisCacheList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RedisCache `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RedisCache{}, &RedisCacheList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0c4c445c975..0103da7fdad 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -121,6 +121,164 @@ func (in *CosmosDBStatus) DeepCopy() *CosmosDBStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCache) DeepCopyInto(out *RedisCache) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCache. +func (in *RedisCache) DeepCopy() *RedisCache { + if in == nil { + return nil + } + out := new(RedisCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCache) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheAdditionalResources) DeepCopyInto(out *RedisCacheAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheAdditionalResources. +func (in *RedisCacheAdditionalResources) DeepCopy() *RedisCacheAdditionalResources { + if in == nil { + return nil + } + out := new(RedisCacheAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheList) DeepCopyInto(out *RedisCacheList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RedisCache, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheList. +func (in *RedisCacheList) DeepCopy() *RedisCacheList { + if in == nil { + return nil + } + out := new(RedisCacheList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCacheList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheOutput) DeepCopyInto(out *RedisCacheOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheOutput. +func (in *RedisCacheOutput) DeepCopy() *RedisCacheOutput { + if in == nil { + return nil + } + out := new(RedisCacheOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheProperties) DeepCopyInto(out *RedisCacheProperties) { + *out = *in + out.Sku = in.Sku +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheProperties. +func (in *RedisCacheProperties) DeepCopy() *RedisCacheProperties { + if in == nil { + return nil + } + out := new(RedisCacheProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheSku) DeepCopyInto(out *RedisCacheSku) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSku. +func (in *RedisCacheSku) DeepCopy() *RedisCacheSku { + if in == nil { + return nil + } + out := new(RedisCacheSku) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheSpec) DeepCopyInto(out *RedisCacheSpec) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSpec. +func (in *RedisCacheSpec) DeepCopy() *RedisCacheSpec { + if in == nil { + return nil + } + out := new(RedisCacheSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheStatus) DeepCopyInto(out *RedisCacheStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheStatus. +func (in *RedisCacheStatus) DeepCopy() *RedisCacheStatus { + if in == nil { + return nil + } + out := new(RedisCacheStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Storage) DeepCopyInto(out *Storage) { *out = *in diff --git a/config/crd/bases/service.azure_rediscaches.yaml b/config/crd/bases/service.azure_rediscaches.yaml new file mode 100644 index 00000000000..3f4e2950da9 --- /dev/null +++ b/config/crd/bases/service.azure_rediscaches.yaml @@ -0,0 +1,91 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: rediscaches.service.azure +spec: + group: service.azure + names: + kind: RedisCache + plural: rediscaches + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + description: RedisCache is the Schema for the rediscaches API + properties: + additionalResources: + description: StorageAdditionalResources holds the additional resources + properties: + secrets: + items: + type: string + type: array + type: object + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + output: + properties: + primaryKey: + type: string + redisCacheName: + type: string + secondaryKey: + type: string + type: object + spec: + description: RedisCacheSpec defines the desired state of RedisCache + properties: + location: + type: string + properties: + description: RedisCacheProperties the properties of the Redis Cache. + properties: + enableNonSslPort: + type: boolean + sku: + description: RedisCacheSku the SKU of the Redis Cache. + properties: + capacity: + type: integer + family: + type: string + name: + description: 'Name - The SKU name. Required for account creation; + optional for update. Possible values include: ''StandardLRS'', + ''StandardGRS'', ''StandardRAGRS'', ''StandardZRS'', ''PremiumLRS'', + ''PremiumZRS'', ''StandardGZRS'', ''StandardRAGZRS''' + type: string + type: object + type: object + type: object + status: + description: RedisCacheStatus defines the observed state of RedisCache + properties: + provisioningState: + type: string + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 0463d4e239b..a279f6d4dee 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -4,17 +4,20 @@ resources: - bases/service.azure_storages.yaml - bases/service.azure_cosmosdbs.yaml +- bases/service.azure_rediscaches.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: # [WEBHOOK] patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_storages.yaml #- patches/webhook_in_cosmosdbs.yaml +#- patches/webhook_in_rediscaches.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CAINJECTION] patches here are for enabling the CA injection for each CRD #- patches/cainjection_in_storages.yaml #- patches/cainjection_in_cosmosdbs.yaml +#- patches/cainjection_in_rediscaches.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_rediscaches.yaml b/config/crd/patches/cainjection_in_rediscaches.yaml new file mode 100644 index 00000000000..56a5e83784e --- /dev/null +++ b/config/crd/patches/cainjection_in_rediscaches.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: rediscaches.service.azure diff --git a/config/crd/patches/webhook_in_rediscaches.yaml b/config/crd/patches/webhook_in_rediscaches.yaml new file mode 100644 index 00000000000..3a0e04368dc --- /dev/null +++ b/config/crd/patches/webhook_in_rediscaches.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: rediscaches.service.azure +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 373e0a55c17..5b1782b738d 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -26,6 +26,26 @@ rules: - get - patch - update +- apiGroups: + - service.azure + resources: + - rediscaches + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - rediscaches/status + verbs: + - get + - patch + - update - apiGroups: - service.azure resources: diff --git a/config/samples/service_v1alpha1_rediscache.yaml b/config/samples/service_v1alpha1_rediscache.yaml new file mode 100644 index 00000000000..9dc728d775e --- /dev/null +++ b/config/samples/service_v1alpha1_rediscache.yaml @@ -0,0 +1,12 @@ +apiVersion: service.azure/v1alpha1 +kind: RedisCache +metadata: + name: rediscache-sample +spec: + location: eastus2 + properties: + sku: + name: Basic + family: C + capacity: 1 + enableNonSslPort: true diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go new file mode 100644 index 00000000000..5e81bf4224e --- /dev/null +++ b/controllers/rediscache_controller.go @@ -0,0 +1,219 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "github.com/Azure/azure-service-operator/pkg/client/group" + "github.com/Azure/azure-service-operator/pkg/config" + "github.com/Azure/azure-service-operator/pkg/helpers" + redisCacheTemplate "github.com/Azure/azure-service-operator/pkg/rediscache" + "github.com/Azure/go-autorest/autorest/to" +) + +// RedisCacheReconciler reconciles a RedisCache object +type RedisCacheReconciler struct { + client.Client + Log logr.Logger +} + +// +kubebuilder:rbac:groups=service.azure,resources=rediscaches,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=service.azure,resources=rediscaches/status,verbs=get;update;patch + +func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("rediscache", req.NamespacedName) + + // Fetch the Redis Cache instance + instance := &servicev1alpha1.RedisCache{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + log.Error(err, "unable to fetch RedisCache") + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + log.Info("Getting Redis Cache", "RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) + log.V(1).Info("Describing Redis Cache", "RedisCache", instance) + + redisCacheFinalizerName := "redisCache.finalizers.azure" + // examine DeletionTimestamp to determine if object is under deletion + if instance.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !helpers.ContainsString(instance.ObjectMeta.Finalizers, redisCacheFinalizerName) { + instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, redisCacheFinalizerName) + if err := r.Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } + } + } else { + // The object is being deleted + if helpers.ContainsString(instance.ObjectMeta.Finalizers, redisCacheFinalizerName) { + // our finalizer is present, so lets handle any external dependency + if err := r.deleteExternalResources(instance); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried + return ctrl.Result{}, err + } + + // remove our finalizer from the list and update it. + instance.ObjectMeta.Finalizers = helpers.RemoveString(instance.ObjectMeta.Finalizers, redisCacheFinalizerName) + if err := r.Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, err + } + + resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "redisCache", instance.Name, instance.Namespace) + tags := map[string]*string{ + "name": to.StringPtr(instance.Name), + "namespace": to.StringPtr(instance.Namespace), + "kind": to.StringPtr("redisCache"), + } + log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) + group.CreateGroup(ctx, resourceGroupName, instance.Spec.Location, tags) + + log.Info("Reconciling Redis Cache", "RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) + template := redisCacheTemplate.New(instance) + de, err := template.CreateDeployment(ctx, resourceGroupName) + if err != nil { + log.Error(err, "Failed to reconcile Redis Cache") + return ctrl.Result{}, err + } + + _, err = r.updateStatus(req, resourceGroupName, *de.Properties.ProvisioningState, de.Properties.Outputs) + if err != nil { + return ctrl.Result{}, err + } + + // Redis Cache created successfully - don't requeue + return ctrl.Result{}, nil +} + +func (r *RedisCacheReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&servicev1alpha1.RedisCache{}). + Complete(r) +} + +func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, provisioningState string, outputs interface{}) (*servicev1alpha1.RedisCache, error) { + ctx := context.Background() + log := r.Log.WithValues("Redis Cache", req.NamespacedName) + + resource := &servicev1alpha1.RedisCache{} + r.Get(ctx, req.NamespacedName, resource) + log.Info("Getting Redis Cache", "RedisCache.Namespace", resource.Namespace, "RedisCache.Name", resource.Name) + + resourceCopy := resource.DeepCopy() + resourceCopy.Status.ProvisioningState = provisioningState + if helpers.IsDeploymentComplete(provisioningState) { + if outputs != nil { + resourceCopy.Output.RedisCacheName = helpers.GetOutput(outputs, "redisCacheName") + resourceCopy.Output.PrimaryKey = helpers.GetOutput(outputs, "primaryKey") + resourceCopy.Output.SecondaryKey = helpers.GetOutput(outputs, "secondaryKey") + } + } + + err := r.Status().Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Redis Cache status") + return nil, err + } + log.Info("Updated Status", "Redis Cache.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.Status", resourceCopy.Status, "RedisCache.Output", resourceCopy.Output) + + if helpers.IsDeploymentComplete(provisioningState) { + err := r.syncAdditionalResources(req, resourceCopy) + if err != nil { + log.Error(err, "error syncing resources") + return nil, err + } + log.Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.AdditionalResources", resourceCopy.AdditionalResources) + } + + return resourceCopy, nil +} + +func (r *RedisCacheReconciler) deleteExternalResources(instance *servicev1alpha1.RedisCache) error { + // + // delete any external resources associated with the storage + // + // Ensure that delete implementation is idempotent and safe to invoke + // multiple types for same object. + ctx := context.Background() + log := r.Log.WithValues("RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) + + resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "redisCache", instance.Name, instance.Namespace) + log.Info("Deleting Redis Cache", "ResourceGroupName", resourceGroupName) + _, err := group.DeleteGroup(ctx, resourceGroupName) + if err != nil { + return err + } + + err = helpers.DeleteSecret(instance.Name, instance.Namespace) + if err != nil { + return err + } + + return nil +} + +func (r *RedisCacheReconciler) syncAdditionalResources(req ctrl.Request, s *servicev1alpha1.RedisCache) (err error) { + ctx := context.Background() + log := r.Log.WithValues("redisCache", req.NamespacedName) + + resource := &servicev1alpha1.RedisCache{} + r.Get(ctx, req.NamespacedName, resource) + + secrets := []string{} + secretData := map[string]string{ + "redisCacheName": "{{.Obj.Output.RedisCacheName}}", + "primaryKey": "{{.Obj.Output.PrimaryKey}}", + "secondaryKey": "{{.Obj.Output.SecondaryKey}}", + } + secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) + secrets = append(secrets, secret) + + resourceCopy := resource.DeepCopy() + resourceCopy.AdditionalResources.Secrets = secrets + + err = r.Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Redis Cache status") + return err + } + + return nil +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index d50e540e197..6d398c65a8e 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -74,6 +74,9 @@ var _ = BeforeSuite(func(done Done) { err = servicev1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = servicev1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) diff --git a/main.go b/main.go index 714e66b9b34..a2b4781bae2 100644 --- a/main.go +++ b/main.go @@ -107,6 +107,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "CosmosDB") os.Exit(1) } + if err = (&controllers.RedisCacheReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("RedisCache"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "RedisCache") + os.Exit(1) + } // +kubebuilder:scaffold:builder setupLog.Info("starting manager") diff --git a/pkg/rediscache/rediscache_template.go b/pkg/rediscache/rediscache_template.go new file mode 100644 index 00000000000..109df3c6bab --- /dev/null +++ b/pkg/rediscache/rediscache_template.go @@ -0,0 +1,47 @@ +package rediscache + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" + uuid "github.com/satori/go.uuid" + + azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "github.com/Azure/azure-service-operator/pkg/client/deployment" +) + +// New generates a new object +func New(redisCache *azureV1alpha1.RedisCache) *Template { + return &Template{ + RedisCache: redisCache, + } +} + +// Template defines the dynamodb cfts +type Template struct { + RedisCache *azureV1alpha1.RedisCache +} + +func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (resources.DeploymentExtended, error) { + deploymentName := uuid.NewV4().String() + templateURI := "https://azureserviceoperator.blob.core.windows.net/templates/rediscache.json" + params := map[string]interface{}{ + "location": map[string]interface{}{ + "value": t.RedisCache.Spec.Location, + }, + "properties.sku.name": map[string]interface{}{ + "value": t.RedisCache.Spec.Properties.Sku.Name, + }, + "properties.sku.family": map[string]interface{}{ + "value": t.RedisCache.Spec.Properties.Sku.Family, + }, + "properties.sku.capacity": map[string]interface{}{ + "value": t.RedisCache.Spec.Properties.Sku.Capacity, + }, + "properties.enableNonSslPort": map[string]interface{}{ + "value": t.RedisCache.Spec.Properties.EnableNonSslPort, + }, + } + + return deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) +} diff --git a/template/rediscache.json b/template/rediscache.json new file mode 100644 index 00000000000..79b77e0a1e7 --- /dev/null +++ b/template/rediscache.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "string" + }, + "properties.sku.name": { + "type": "string" + }, + "properties.sku.family": { + "type": "string" + }, + "properties.sku.capacity": { + "type": "int" + }, + "properties.enableNonSslPort": { + "type": "bool" + } + }, + "variables": { + "redisCacheName": "[concat('aso', uniqueString(resourceGroup().id))]" + }, + "resources": [ + { + "type": "Microsoft.Cache/Redis", + "apiVersion": "2018-03-01", + "name": "[variables('redisCacheName')]", + "location": "[parameters('location')]", + "properties": { + "sku": { + "name": "[parameters('properties.sku.name')]", + "family": "[parameters('properties.sku.family')]", + "capacity": 0 + }, + "enableNonSslPort": "[parameters('properties.enableNonSslPort')]", + "redisConfiguration": {} + } + } + ], + "outputs": { + "redisCacheName": { + "type": "string", + "value": "[variables('redisCacheName')]" + }, + "primaryKey": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Cache/Redis', variables('redisCacheName')), '2018-03-01').primaryKey]" + }, + "secondaryKey": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Cache/Redis', variables('redisCacheName')), '2018-03-01').secondaryKey]" + } + } +} From cd6308926288a2bfffa39be83b74f94647f9c8c8 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Tue, 23 Jul 2019 11:51:49 +0800 Subject: [PATCH 14/34] Ignore the NotFound error when deleting resources --- controllers/rediscache_controller.go | 6 +++--- controllers/storage_controller.go | 7 +++---- pkg/helpers/helpers.go | 13 +++++++++++++ 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go index 5e81bf4224e..599ad3f0d4a 100644 --- a/controllers/rediscache_controller.go +++ b/controllers/rediscache_controller.go @@ -60,7 +60,7 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) // we'll ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them // on deleted requests. - return ctrl.Result{}, client.IgnoreNotFound(err) + return ctrl.Result{}, helpers.IgnoreKubernetesResourceNotFound(err) } log.Info("Getting Redis Cache", "RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) log.V(1).Info("Describing Redis Cache", "RedisCache", instance) @@ -178,12 +178,12 @@ func (r *RedisCacheReconciler) deleteExternalResources(instance *servicev1alpha1 resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "redisCache", instance.Name, instance.Namespace) log.Info("Deleting Redis Cache", "ResourceGroupName", resourceGroupName) _, err := group.DeleteGroup(ctx, resourceGroupName) - if err != nil { + if err != nil && helpers.IgnoreAzureResourceNotFound(err) != nil { return err } err = helpers.DeleteSecret(instance.Name, instance.Namespace) - if err != nil { + if err != nil && helpers.IgnoreKubernetesResourceNotFound(err) != nil { return err } diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index 2b56f91ca0c..b11dde2ad31 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -60,7 +60,7 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { // we'll ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them // on deleted requests. - return ctrl.Result{}, client.IgnoreNotFound(err) + return ctrl.Result{}, helpers.IgnoreKubernetesResourceNotFound(err) } log.Info("Getting Storage Account", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) log.V(1).Info("Describing Storage Account", "Storage", instance) @@ -103,7 +103,6 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { "namespace": to.StringPtr(instance.Namespace), "kind": to.StringPtr("storage"), } - log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) group.CreateGroup(ctx, resourceGroupName, instance.Spec.Location, tags) log.Info("Reconciling Storage", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) @@ -180,12 +179,12 @@ func (r *StorageReconciler) deleteExternalResources(instance *servicev1alpha1.St resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "storage", instance.Name, instance.Namespace) log.Info("Deleting Storage Account", "ResourceGroupName", resourceGroupName) _, err := group.DeleteGroup(ctx, resourceGroupName) - if err != nil { + if err != nil && helpers.IgnoreAzureResourceNotFound(err) != nil { return err } err = helpers.DeleteSecret(instance.Name, instance.Namespace) - if err != nil { + if err != nil && helpers.IgnoreKubernetesResourceNotFound(err) != nil { return err } diff --git a/pkg/helpers/helpers.go b/pkg/helpers/helpers.go index 0dde07e8aab..dabb45b5097 100644 --- a/pkg/helpers/helpers.go +++ b/pkg/helpers/helpers.go @@ -7,7 +7,9 @@ import ( "regexp" "strings" + "github.com/Azure/go-autorest/autorest" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" ) var ( @@ -47,3 +49,14 @@ func AzrueResourceGroupName(subscriptionID, clusterName, resourceType, name, nam io.WriteString(hash, nameString) return fmt.Sprintf("aso-%x", hash.Sum(nil)) } + +func IgnoreKubernetesResourceNotFound(err error) error { + return client.IgnoreNotFound(err) +} + +func IgnoreAzureResourceNotFound(err error) error { + if err.(autorest.DetailedError).StatusCode.(int) == 404 { + return nil + } + return err +} From 15b9ec10bd8fc16a5386811c693f91ff767cc704 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Tue, 23 Jul 2019 02:41:41 +0800 Subject: [PATCH 15/34] Requeue the request if the deployment is not complete --- api/v1alpha1/rediscache_types.go | 1 + api/v1alpha1/storage_types.go | 2 ++ .../crd/bases/service.azure_rediscaches.yaml | 2 ++ config/crd/bases/service.azure_storages.yaml | 5 ++-- controllers/rediscache_controller.go | 29 ++++++++++++++++--- controllers/storage_controller.go | 28 ++++++++++++++++-- go.mod | 8 ++++- go.sum | 18 +----------- pkg/client/deployment/deployment.go | 19 +++++------- pkg/rediscache/rediscache_template.go | 6 ++-- pkg/storage/storage_template.go | 6 ++-- 11 files changed, 78 insertions(+), 46 deletions(-) diff --git a/api/v1alpha1/rediscache_types.go b/api/v1alpha1/rediscache_types.go index 8d1468434e0..80bdedac4b4 100644 --- a/api/v1alpha1/rediscache_types.go +++ b/api/v1alpha1/rediscache_types.go @@ -81,6 +81,7 @@ type RedisCacheStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + DeploymentName string `json:"deploymentName,omitempty"` ProvisioningState string `json:"provisioningState,omitempty"` } diff --git a/api/v1alpha1/storage_types.go b/api/v1alpha1/storage_types.go index 1046171ceb1..176ce8de8ca 100644 --- a/api/v1alpha1/storage_types.go +++ b/api/v1alpha1/storage_types.go @@ -81,6 +81,8 @@ type StorageAccessTier string type StorageStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + + DeploymentName string `json:"deploymentName,omitempty"` ProvisioningState string `json:"provisioningState,omitempty"` } diff --git a/config/crd/bases/service.azure_rediscaches.yaml b/config/crd/bases/service.azure_rediscaches.yaml index 3f4e2950da9..4ad9cfcd827 100644 --- a/config/crd/bases/service.azure_rediscaches.yaml +++ b/config/crd/bases/service.azure_rediscaches.yaml @@ -75,6 +75,8 @@ spec: status: description: RedisCacheStatus defines the observed state of RedisCache properties: + deploymentName: + type: string provisioningState: type: string type: object diff --git a/config/crd/bases/service.azure_storages.yaml b/config/crd/bases/service.azure_storages.yaml index 8268ba34d83..5d8479132ac 100644 --- a/config/crd/bases/service.azure_storages.yaml +++ b/config/crd/bases/service.azure_storages.yaml @@ -99,10 +99,9 @@ spec: status: description: StorageStatus defines the observed state of Storage properties: + deploymentName: + type: string provisioningState: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' type: string type: object type: object diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go index 599ad3f0d4a..299a5337def 100644 --- a/controllers/rediscache_controller.go +++ b/controllers/rediscache_controller.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "github.com/Azure/azure-service-operator/pkg/client/deployment" "github.com/Azure/azure-service-operator/pkg/client/group" "github.com/Azure/azure-service-operator/pkg/config" "github.com/Azure/azure-service-operator/pkg/helpers" @@ -98,23 +99,42 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) } resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "redisCache", instance.Name, instance.Namespace) + deploymentName := instance.Status.DeploymentName + if deploymentName != "" { + log.Info("Checking deployment", "ResourceGroupName", resourceGroupName, "DeploymentName", deploymentName) + de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) + provisioningState := *de.Properties.ProvisioningState + if helpers.IsDeploymentComplete(provisioningState) { + log.Info("Deployment is complete", "ProvisioningState", provisioningState) + _, err = r.updateStatus(req, resourceGroupName, deploymentName, provisioningState, de.Properties.Outputs) + if err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } else { + log.Info("Requeue the request", "ProvisioningState", provisioningState) + return ctrl.Result{Requeue: true}, nil + } + } + + log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) tags := map[string]*string{ "name": to.StringPtr(instance.Name), "namespace": to.StringPtr(instance.Namespace), "kind": to.StringPtr("redisCache"), } - log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) group.CreateGroup(ctx, resourceGroupName, instance.Spec.Location, tags) log.Info("Reconciling Redis Cache", "RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) template := redisCacheTemplate.New(instance) - de, err := template.CreateDeployment(ctx, resourceGroupName) + deploymentName, err = template.CreateDeployment(ctx, resourceGroupName) if err != nil { log.Error(err, "Failed to reconcile Redis Cache") return ctrl.Result{}, err } - _, err = r.updateStatus(req, resourceGroupName, *de.Properties.ProvisioningState, de.Properties.Outputs) + de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) + _, err = r.updateStatus(req, resourceGroupName, deploymentName, *de.Properties.ProvisioningState, nil) if err != nil { return ctrl.Result{}, err } @@ -129,7 +149,7 @@ func (r *RedisCacheReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, provisioningState string, outputs interface{}) (*servicev1alpha1.RedisCache, error) { +func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.RedisCache, error) { ctx := context.Background() log := r.Log.WithValues("Redis Cache", req.NamespacedName) @@ -138,6 +158,7 @@ func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, log.Info("Getting Redis Cache", "RedisCache.Namespace", resource.Namespace, "RedisCache.Name", resource.Name) resourceCopy := resource.DeepCopy() + resourceCopy.Status.DeploymentName = deploymentName resourceCopy.Status.ProvisioningState = provisioningState if helpers.IsDeploymentComplete(provisioningState) { if outputs != nil { diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index b11dde2ad31..009da5bc0ef 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "github.com/Azure/azure-service-operator/pkg/client/deployment" "github.com/Azure/azure-service-operator/pkg/client/group" "github.com/Azure/azure-service-operator/pkg/config" "github.com/Azure/azure-service-operator/pkg/helpers" @@ -98,6 +99,25 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { } resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "storage", instance.Name, instance.Namespace) + deploymentName := instance.Status.DeploymentName + if deploymentName != "" { + log.Info("Checking deployment", "ResourceGroupName", resourceGroupName, "DeploymentName", deploymentName) + de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) + provisioningState := *de.Properties.ProvisioningState + if helpers.IsDeploymentComplete(provisioningState) { + log.Info("Deployment is complete", "ProvisioningState", provisioningState) + _, err = r.updateStatus(req, resourceGroupName, deploymentName, provisioningState, de.Properties.Outputs) + if err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } else { + log.Info("Requeue the request", "ProvisioningState", provisioningState) + return ctrl.Result{Requeue: true}, nil + } + } + + log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) tags := map[string]*string{ "name": to.StringPtr(instance.Name), "namespace": to.StringPtr(instance.Namespace), @@ -107,13 +127,14 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { log.Info("Reconciling Storage", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) template := storagetemplate.New(instance) - de, err := template.CreateDeployment(ctx, resourceGroupName) + deploymentName, err = template.CreateDeployment(ctx, resourceGroupName) if err != nil { log.Error(err, "Failed to reconcile Storage") return ctrl.Result{}, err } - _, err = r.updateStatus(req, resourceGroupName, *de.Properties.ProvisioningState, de.Properties.Outputs) + de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) + _, err = r.updateStatus(req, resourceGroupName, deploymentName, *de.Properties.ProvisioningState, nil) if err != nil { return ctrl.Result{}, err } @@ -128,7 +149,7 @@ func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, provisioningState string, outputs interface{}) (*servicev1alpha1.Storage, error) { +func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.Storage, error) { ctx := context.Background() log := r.Log.WithValues("storage", req.NamespacedName) @@ -137,6 +158,7 @@ func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, pr log.Info("Getting Storage Account", "Storage.Namespace", resource.Namespace, "Storage.Name", resource.Name) resourceCopy := resource.DeepCopy() + resourceCopy.Status.DeploymentName = deploymentName resourceCopy.Status.ProvisioningState = provisioningState if helpers.IsDeploymentComplete(provisioningState) { if outputs != nil { diff --git a/go.mod b/go.mod index d7b34961ee9..74b005ca4dc 100644 --- a/go.mod +++ b/go.mod @@ -10,14 +10,20 @@ require ( github.com/Azure/go-autorest/autorest/to v0.2.0 github.com/Azure/go-autorest/autorest/validation v0.1.0 // indirect github.com/go-logr/logr v0.1.0 + github.com/gogo/protobuf v1.2.1 // indirect + github.com/google/go-cmp v0.3.0 // indirect + github.com/json-iterator/go v1.1.6 // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect github.com/onsi/ginkgo v1.8.0 github.com/onsi/gomega v1.5.0 github.com/satori/go.uuid v1.2.0 github.com/spf13/pflag v1.0.3 golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 + golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 // indirect + golang.org/x/text v0.3.2 // indirect + gopkg.in/yaml.v2 v2.2.2 // indirect k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible sigs.k8s.io/controller-runtime v0.2.0-beta.4 - sigs.k8s.io/controller-tools v0.2.0-beta.4 // indirect ) diff --git a/go.sum b/go.sum index bd42b12c09c..8bbc0615885 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,6 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -63,8 +61,6 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= -github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= @@ -83,6 +79,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -100,7 +97,6 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= @@ -115,10 +111,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -175,8 +167,6 @@ github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= @@ -238,7 +228,6 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 h1:cGjJzUd8RgBw428LXP65YXni0aiGNA4Bl+ls8SmLOm8= @@ -255,8 +244,6 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190501045030-23463209683d h1:D7DVZUZEUgsSIDTivnUtVeGfN5AvhDIKtdIZAqx0ieE= -golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= gomodules.xyz/jsonpatch/v2 v2.0.0 h1:OyHbl+7IOECpPKfVK42oFr6N7+Y2dR+Jsb/IiDV3hOo= gomodules.xyz/jsonpatch/v2 v2.0.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= @@ -298,7 +285,6 @@ k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7 k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= @@ -307,8 +293,6 @@ k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/controller-runtime v0.2.0-beta.4 h1:S1XVfRWR1MuIXZdkYx3jN8JDw+bbQxmWZroy0i87z/A= sigs.k8s.io/controller-runtime v0.2.0-beta.4/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME= -sigs.k8s.io/controller-tools v0.2.0-beta.4 h1:W+coTe+nkVNclQrikwlRp6GJKwgcrHzvIQZ9kCaak5A= -sigs.k8s.io/controller-tools v0.2.0-beta.4/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y= sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= diff --git a/pkg/client/deployment/deployment.go b/pkg/client/deployment/deployment.go index ee9ff1eb506..bb3634a6e13 100644 --- a/pkg/client/deployment/deployment.go +++ b/pkg/client/deployment/deployment.go @@ -2,7 +2,6 @@ package deployment import ( "context" - "fmt" "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" @@ -19,12 +18,12 @@ func getDeploymentsClient() resources.DeploymentsClient { // CreateDeployment creates a template deployment using the // referenced JSON files for the template and its parameters -func CreateDeployment(ctx context.Context, resourceGroupName, deploymentName, templateUri string, params *map[string]interface{}) (de resources.DeploymentExtended, err error) { +func CreateDeployment(ctx context.Context, resourceGroupName, deploymentName, templateUri string, params *map[string]interface{}) error { deployClient := getDeploymentsClient() templateLink := resources.TemplateLink{ URI: &templateUri, } - future, err := deployClient.CreateOrUpdate( + _, err := deployClient.CreateOrUpdate( ctx, resourceGroupName, deploymentName, @@ -36,14 +35,10 @@ func CreateDeployment(ctx context.Context, resourceGroupName, deploymentName, te }, }, ) - if err != nil { - return de, fmt.Errorf("cannot create deployment: %v", err) - } - - err = future.WaitForCompletionRef(ctx, deployClient.Client) - if err != nil { - return de, fmt.Errorf("cannot get the create deployment future respone: %v", err) - } + return err +} - return future.Result(deployClient) +func GetDeployment(ctx context.Context, resourceGroupName, deploymentName string) (de resources.DeploymentExtended, err error) { + deployClient := getDeploymentsClient() + return deployClient.Get(ctx, resourceGroupName, deploymentName) } diff --git a/pkg/rediscache/rediscache_template.go b/pkg/rediscache/rediscache_template.go index 109df3c6bab..38a86858ae2 100644 --- a/pkg/rediscache/rediscache_template.go +++ b/pkg/rediscache/rediscache_template.go @@ -3,7 +3,6 @@ package rediscache import ( "context" - "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" uuid "github.com/satori/go.uuid" azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" @@ -22,7 +21,7 @@ type Template struct { RedisCache *azureV1alpha1.RedisCache } -func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (resources.DeploymentExtended, error) { +func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { deploymentName := uuid.NewV4().String() templateURI := "https://azureserviceoperator.blob.core.windows.net/templates/rediscache.json" params := map[string]interface{}{ @@ -43,5 +42,6 @@ func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName strin }, } - return deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) + err := deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) + return deploymentName, err } diff --git a/pkg/storage/storage_template.go b/pkg/storage/storage_template.go index 85d58fb9f43..9267595b4c4 100644 --- a/pkg/storage/storage_template.go +++ b/pkg/storage/storage_template.go @@ -3,7 +3,6 @@ package storage import ( "context" - "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" uuid "github.com/satori/go.uuid" azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" @@ -22,7 +21,7 @@ type Template struct { Storage *azureV1alpha1.Storage } -func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (resources.DeploymentExtended, error) { +func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { deploymentName := uuid.NewV4().String() templateURI := "https://azureserviceoperator.blob.core.windows.net/templates/storage.json" params := map[string]interface{}{ @@ -43,5 +42,6 @@ func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName strin }, } - return deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) + err := deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) + return deploymentName, err } From 6fc89ab04afde7745ad8b58ab5027ef1361910c8 Mon Sep 17 00:00:00 2001 From: Zhongyi Zhang Date: Tue, 23 Jul 2019 05:50:43 +0000 Subject: [PATCH 16/34] feature: add cosmosdb service --- api/v1alpha1/cosmosdb_types.go | 62 +++++- api/v1alpha1/zz_generated.deepcopy.go | 53 +++++ config/crd/bases/service.azure_cosmosdbs.yaml | 46 +++++ config/samples/service_v1alpha1_cosmosdb.yaml | 6 +- controllers/cosmosdb_controller.go | 185 +++++++++++++++++- pkg/cosmosdb/cosmosdb_template.go | 41 ++++ template/cosmosdb.json | 47 +++++ 7 files changed, 433 insertions(+), 7 deletions(-) create mode 100644 pkg/cosmosdb/cosmosdb_template.go create mode 100644 template/cosmosdb.json diff --git a/api/v1alpha1/cosmosdb_types.go b/api/v1alpha1/cosmosdb_types.go index 0a5498f3f07..7d1340b05ef 100644 --- a/api/v1alpha1/cosmosdb_types.go +++ b/api/v1alpha1/cosmosdb_types.go @@ -35,23 +35,81 @@ import ( type CosmosDBSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:MinLength=0 + + Location string `json:"location,omitempty"` + Kind CosmosDBKind `json:"kind,omitempty"` + Properties CosmosDBProperties `json:"properties,omitempty"` +} + +// CosmosDBKind enumerates the values for kind. +// Only one of the following kinds may be specified. +// If none of the following kinds is specified, the default one +// is GlobalDocumentDBKind. +// +kubebuilder:validation:Enum=GlobalDocumentDB;MongoDB +type CosmosDBKind string + +const ( + CosmosDBKindGlobalDocumentDB CosmosDBKind = "GlobalDocumentDB" + CosmosDBKindMongoDB CosmosDBKind = "MongoDB" +) + +// CosmosDBProperties the CosmosDBProperties of CosmosDB. +type CosmosDBProperties struct { + // CosmosDBDatabaseAccountOfferType - The offer type for the Cosmos DB database account. + DatabaseAccountOfferType CosmosDBDatabaseAccountOfferType `json:"databaseAccountOfferType,omitempty"` + //Locations []CosmosDBLocation `json:"locations,omitempty"` +} + +// +kubebuilder:validation:Enum=Standard +type CosmosDBDatabaseAccountOfferType string + +const ( + CosmosDBDatabaseAccountOfferTypeStandard CosmosDBDatabaseAccountOfferType = "Standard" +) + +/* +type CosmosDBLocation struct { + FailoverPriority int `json:"failoverPriority,omitempty"` + LocationName string `json:"locationName,omitempty"` + IsZoneRedundant bool `json:"isZoneRedundant,omitempty"` } +*/ // CosmosDBStatus defines the observed state of CosmosDB type CosmosDBStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` +} + +type CosmosDBOutput struct { + CosmosDBName string `json:"cosmosDBName,omitempty"` + PrimaryMasterKey string `json:"primaryMasterKey,omitempty"` + //SecondaryMasterKey string `json:"secondaryMasterKey,omitempty"` + //PrimaryReadonlyMasterKey string `json:"primaryReadonlyMasterKey,omitempty"` + //SecondaryReadonlyMasterKey string `json:"secondaryReadonlyMasterKey,omitempty"` +} + +// CosmosDBAdditionalResources holds the additional resources +type CosmosDBAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` } // +kubebuilder:object:root=true +// +kubebuilder:subresource:status // CosmosDB is the Schema for the cosmosdbs API type CosmosDB struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec CosmosDBSpec `json:"spec,omitempty"` - Status CosmosDBStatus `json:"status,omitempty"` + Spec CosmosDBSpec `json:"spec,omitempty"` + Status CosmosDBStatus `json:"status,omitempty"` + Output CosmosDBOutput `json:"output,omitempty"` + AdditionalResources CosmosDBAdditionalResources `json:"additionalResources,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0103da7fdad..298e5ed120a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -39,6 +39,8 @@ func (in *CosmosDB) DeepCopyInto(out *CosmosDB) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDB. @@ -59,6 +61,26 @@ func (in *CosmosDB) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBAdditionalResources) DeepCopyInto(out *CosmosDBAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBAdditionalResources. +func (in *CosmosDBAdditionalResources) DeepCopy() *CosmosDBAdditionalResources { + if in == nil { + return nil + } + out := new(CosmosDBAdditionalResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CosmosDBList) DeepCopyInto(out *CosmosDBList) { *out = *in @@ -91,9 +113,40 @@ func (in *CosmosDBList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBOutput) DeepCopyInto(out *CosmosDBOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBOutput. +func (in *CosmosDBOutput) DeepCopy() *CosmosDBOutput { + if in == nil { + return nil + } + out := new(CosmosDBOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBProperties) DeepCopyInto(out *CosmosDBProperties) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBProperties. +func (in *CosmosDBProperties) DeepCopy() *CosmosDBProperties { + if in == nil { + return nil + } + out := new(CosmosDBProperties) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CosmosDBSpec) DeepCopyInto(out *CosmosDBSpec) { *out = *in + out.Properties = in.Properties } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBSpec. diff --git a/config/crd/bases/service.azure_cosmosdbs.yaml b/config/crd/bases/service.azure_cosmosdbs.yaml index b4d852bb61e..e5eb061f9c7 100644 --- a/config/crd/bases/service.azure_cosmosdbs.yaml +++ b/config/crd/bases/service.azure_cosmosdbs.yaml @@ -11,10 +11,20 @@ spec: kind: CosmosDB plural: cosmosdbs scope: "" + subresources: + status: {} validation: openAPIV3Schema: description: CosmosDB is the Schema for the cosmosdbs API properties: + additionalResources: + description: CosmosDBAdditionalResources holds the additional resources + properties: + secrets: + items: + type: string + type: array + type: object apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest @@ -27,11 +37,47 @@ spec: type: string metadata: type: object + output: + properties: + cosmosDBName: + type: string + primaryMasterKey: + type: string + type: object spec: description: CosmosDBSpec defines the desired state of CosmosDB + properties: + kind: + description: CosmosDBKind enumerates the values for kind. Only one of + the following kinds may be specified. If none of the following kinds + is specified, the default one is GlobalDocumentDBKind. + enum: + - GlobalDocumentDB + - MongoDB + type: string + location: + type: string + properties: + description: CosmosDBProperties the CosmosDBProperties of CosmosDB. + properties: + databaseAccountOfferType: + description: CosmosDBDatabaseAccountOfferType - The offer type for + the Cosmos DB database account. + enum: + - Standard + type: string + type: object type: object status: description: CosmosDBStatus defines the observed state of CosmosDB + properties: + deploymentName: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + provisioningState: + type: string type: object type: object versions: diff --git a/config/samples/service_v1alpha1_cosmosdb.yaml b/config/samples/service_v1alpha1_cosmosdb.yaml index f24eec665d9..6b728d87db9 100644 --- a/config/samples/service_v1alpha1_cosmosdb.yaml +++ b/config/samples/service_v1alpha1_cosmosdb.yaml @@ -3,5 +3,7 @@ kind: CosmosDB metadata: name: cosmosdb-sample spec: - # Add fields here - foo: bar + kind: GlobalDocumentDB + location: westus + properties: + databaseAccountOfferType: Standard diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go index 25584193218..388d4b73977 100644 --- a/controllers/cosmosdb_controller.go +++ b/controllers/cosmosdb_controller.go @@ -32,6 +32,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "github.com/Azure/azure-service-operator/pkg/client/deployment" + "github.com/Azure/azure-service-operator/pkg/client/group" + "github.com/Azure/azure-service-operator/pkg/config" + cosmosdbtemplate "github.com/Azure/azure-service-operator/pkg/cosmosdb" + "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/go-autorest/autorest/to" ) // CosmosDBReconciler reconciles a CosmosDB object @@ -44,11 +50,96 @@ type CosmosDBReconciler struct { // +kubebuilder:rbac:groups=service.azure,resources=cosmosdbs/status,verbs=get;update;patch func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { - _ = context.Background() - _ = r.Log.WithValues("cosmosdb", req.NamespacedName) + ctx := context.Background() + log := r.Log.WithValues("cosmosdb", req.NamespacedName) - // your logic here + // Fetch the CosmosDB instance + instance := &servicev1alpha1.CosmosDB{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + log.Error(err, "unable to fetch CosmosDB") + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + log.Info("Getting CosmosDB Account", "CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) + log.V(1).Info("Describing CosmosDB Account", "CosmosDB", instance) + cosmosDBFinalizerName := "cosmosdb.finalizers.azure" + // examine DeletionTimestamp to determine if object is under deletion + if instance.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !helpers.ContainsString(instance.ObjectMeta.Finalizers, cosmosDBFinalizerName) { + instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, cosmosDBFinalizerName) + if err := r.Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } + } + } else { + // The object is being deleted + if helpers.ContainsString(instance.ObjectMeta.Finalizers, cosmosDBFinalizerName) { + // our finalizer is present, so lets handle any external dependency + if err := r.deleteExternalResources(instance); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried + return ctrl.Result{}, err + } + + // remove our finalizer from the list and update it. + instance.ObjectMeta.Finalizers = helpers.RemoveString(instance.ObjectMeta.Finalizers, cosmosDBFinalizerName) + if err := r.Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, err + } + + resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "cosmosdb", instance.Name, instance.Namespace) + deploymentName := instance.Status.DeploymentName + if deploymentName != "" { + log.Info("Checking deployment", "ResourceGroupName", resourceGroupName, "DeploymentName", deploymentName) + de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) + provisioningState := *de.Properties.ProvisioningState + if helpers.IsDeploymentComplete(provisioningState) { + log.Info("Deployment is complete", "ProvisioningState", provisioningState) + _, err = r.updateStatus(req, resourceGroupName, deploymentName, provisioningState, de.Properties.Outputs) + if err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } else { + log.Info("Requeue the request", "ProvisioningState", provisioningState) + return ctrl.Result{Requeue: true}, nil + } + } + + log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) + tags := map[string]*string{ + "name": to.StringPtr(instance.Name), + "namespace": to.StringPtr(instance.Namespace), + "kind": to.StringPtr("cosmosdb"), + } + group.CreateGroup(ctx, resourceGroupName, instance.Spec.Location, tags) + + log.Info("Reconciling CosmosDB", "CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) + template := cosmosdbtemplate.New(instance) + deploymentName, err = template.CreateDeployment(ctx, resourceGroupName) + if err != nil { + log.Error(err, "Failed to reconcile CosmosDB") + return ctrl.Result{}, err + } + + de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) + _, err = r.updateStatus(req, resourceGroupName, deploymentName, *de.Properties.ProvisioningState, nil) + if err != nil { + return ctrl.Result{}, err + } + + // CosmosDB created successfully - don't requeue return ctrl.Result{}, nil } @@ -57,3 +148,91 @@ func (r *CosmosDBReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&servicev1alpha1.CosmosDB{}). Complete(r) } + +func (r *CosmosDBReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.CosmosDB, error) { + ctx := context.Background() + log := r.Log.WithValues("cosmosdb", req.NamespacedName) + + resource := &servicev1alpha1.CosmosDB{} + r.Get(ctx, req.NamespacedName, resource) + log.Info("Getting CosmosDB Account", "CosmosDB.Namespace", resource.Namespace, "CosmosDB.Name", resource.Name) + + resourceCopy := resource.DeepCopy() + resourceCopy.Status.DeploymentName = deploymentName + resourceCopy.Status.ProvisioningState = provisioningState + if helpers.IsDeploymentComplete(provisioningState) { + if outputs != nil { + resourceCopy.Output.CosmosDBName = helpers.GetOutput(outputs, "cosmosDBName") + resourceCopy.Output.PrimaryMasterKey = helpers.GetOutput(outputs, "primaryMasterKey") + } + } + + err := r.Status().Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update CosmosDB status") + return nil, err + } + log.Info("Updated Status", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.Status", resourceCopy.Status, "CosmosDB.Output", resourceCopy.Output) + + if helpers.IsDeploymentComplete(provisioningState) { + err := r.syncAdditionalResources(req, resourceCopy) + if err != nil { + log.Error(err, "error syncing resources") + return nil, err + } + log.Info("Updated additional resources", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.AdditionalResources", resourceCopy.AdditionalResources) + } + + return resourceCopy, nil +} + +func (r *CosmosDBReconciler) deleteExternalResources(instance *servicev1alpha1.CosmosDB) error { + // + // delete any external resources associated with the cosmosdb + // + // Ensure that delete implementation is idempotent and safe to invoke + // multiple types for same object. + ctx := context.Background() + log := r.Log.WithValues("CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) + + resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "cosmosdb", instance.Name, instance.Namespace) + log.Info("Deleting CosmosDB Account", "ResourceGroupName", resourceGroupName) + _, err := group.DeleteGroup(ctx, resourceGroupName) + if err != nil { + return err + } + + err = helpers.DeleteSecret(instance.Name, instance.Namespace) + if err != nil { + return err + } + + return nil +} + +func (r *CosmosDBReconciler) syncAdditionalResources(req ctrl.Request, s *servicev1alpha1.CosmosDB) (err error) { + ctx := context.Background() + log := r.Log.WithValues("cosmosdb", req.NamespacedName) + + resource := &servicev1alpha1.CosmosDB{} + r.Get(ctx, req.NamespacedName, resource) + + secrets := []string{} + secretData := map[string]string{ + "cosmosDBName": "{{.Obj.Output.CosmosDBName}}", + "primaryMasterKey": "{{.Obj.Output.PrimaryMasterKey}}", + } + secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) + secrets = append(secrets, secret) + + resourceCopy := resource.DeepCopy() + resourceCopy.AdditionalResources.Secrets = secrets + + err = r.Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update CosmosDB status") + return err + } + + return nil +} diff --git a/pkg/cosmosdb/cosmosdb_template.go b/pkg/cosmosdb/cosmosdb_template.go new file mode 100644 index 00000000000..a9bce2cd181 --- /dev/null +++ b/pkg/cosmosdb/cosmosdb_template.go @@ -0,0 +1,41 @@ +package cosmosdb + +import ( + "context" + + uuid "github.com/satori/go.uuid" + + azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + "github.com/Azure/azure-service-operator/pkg/client/deployment" +) + +// New generates a new object +func New(cosmosdb *azureV1alpha1.CosmosDB) *Template { + return &Template{ + CosmosDB: cosmosdb, + } +} + +// Template defines the dynamodb cfts +type Template struct { + CosmosDB *azureV1alpha1.CosmosDB +} + +func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { + deploymentName := uuid.NewV4().String() + templateURI := "https://azureserviceoperator.blob.core.windows.net/templates/cosmosdb.json" + params := map[string]interface{}{ + "location": map[string]interface{}{ + "value": t.CosmosDB.Spec.Location, + }, + "kind": map[string]interface{}{ + "value": t.CosmosDB.Spec.Kind, + }, + "properties": map[string]interface{}{ + "value": t.CosmosDB.Spec.Properties, + }, + } + + err := deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) + return deploymentName, err +} diff --git a/template/cosmosdb.json b/template/cosmosdb.json new file mode 100644 index 00000000000..f4feb76cae2 --- /dev/null +++ b/template/cosmosdb.json @@ -0,0 +1,47 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "String" + }, + "kind": { + "type": "String" + }, + "properties": { + "type": "Object" + } + }, + "variables": { + "cosmosDBName": "[concat('aso', uniqueString(resourceGroup().id))]" + }, + "resources": [ + { + "type": "Microsoft.DocumentDB/databaseAccounts", + "apiVersion": "2015-04-08", + "name": "[variables('cosmosDBName')]", + "location": "[parameters('location')]", + "dependsOn": [], + "kind": "[parameters('kind')]", + "properties": { + "databaseAccountOfferType": "[parameters('properties').databaseAccountOfferType]", + "locations": [ + { + "locationName": "[parameters('location')]", + "failoverPriority": 0 + } + ] + } + } + ], + "outputs": { + "cosmosDBName": { + "type": "string", + "value": "[variables('cosmosDBName')]" + }, + "primaryMasterKey": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.DocumentDB/databaseAccounts', variables('cosmosDBName')), '2015-04-08').primaryMasterKey]" + } + } +} From c80fa0074e36b7dc5dfa149a6f1d87b674bc730a Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Tue, 23 Jul 2019 21:47:58 +0800 Subject: [PATCH 17/34] Refine the logic of updating additional resources and output --- controllers/cosmosdb_controller.go | 24 ++++++++++-------------- controllers/rediscache_controller.go | 26 +++++++++++--------------- controllers/storage_controller.go | 28 ++++++++++++---------------- pkg/helpers/secret.go | 2 -- 4 files changed, 33 insertions(+), 47 deletions(-) diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go index 388d4b73977..ce3108d30f5 100644 --- a/controllers/cosmosdb_controller.go +++ b/controllers/cosmosdb_controller.go @@ -160,27 +160,26 @@ func (r *CosmosDBReconciler) updateStatus(req ctrl.Request, resourceGroupName, d resourceCopy := resource.DeepCopy() resourceCopy.Status.DeploymentName = deploymentName resourceCopy.Status.ProvisioningState = provisioningState - if helpers.IsDeploymentComplete(provisioningState) { - if outputs != nil { - resourceCopy.Output.CosmosDBName = helpers.GetOutput(outputs, "cosmosDBName") - resourceCopy.Output.PrimaryMasterKey = helpers.GetOutput(outputs, "primaryMasterKey") - } - } err := r.Status().Update(ctx, resourceCopy) if err != nil { log.Error(err, "unable to update CosmosDB status") return nil, err } - log.Info("Updated Status", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.Status", resourceCopy.Status, "CosmosDB.Output", resourceCopy.Output) + log.V(1).Info("Updated Status", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.Status", resourceCopy.Status) if helpers.IsDeploymentComplete(provisioningState) { - err := r.syncAdditionalResources(req, resourceCopy) + if outputs != nil { + resourceCopy.Output.CosmosDBName = helpers.GetOutput(outputs, "cosmosDBName") + resourceCopy.Output.PrimaryMasterKey = helpers.GetOutput(outputs, "primaryMasterKey") + } + + err := r.syncAdditionalResourcesAndOutput(req, resourceCopy) if err != nil { log.Error(err, "error syncing resources") return nil, err } - log.Info("Updated additional resources", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.AdditionalResources", resourceCopy.AdditionalResources) + log.V(1).Info("Updated additional resources", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.AdditionalResources", resourceCopy.AdditionalResources, "CosmosDB.Output", resourceCopy.Output) } return resourceCopy, nil @@ -210,13 +209,10 @@ func (r *CosmosDBReconciler) deleteExternalResources(instance *servicev1alpha1.C return nil } -func (r *CosmosDBReconciler) syncAdditionalResources(req ctrl.Request, s *servicev1alpha1.CosmosDB) (err error) { +func (r *CosmosDBReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.CosmosDB) (err error) { ctx := context.Background() log := r.Log.WithValues("cosmosdb", req.NamespacedName) - resource := &servicev1alpha1.CosmosDB{} - r.Get(ctx, req.NamespacedName, resource) - secrets := []string{} secretData := map[string]string{ "cosmosDBName": "{{.Obj.Output.CosmosDBName}}", @@ -225,7 +221,7 @@ func (r *CosmosDBReconciler) syncAdditionalResources(req ctrl.Request, s *servic secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) secrets = append(secrets, secret) - resourceCopy := resource.DeepCopy() + resourceCopy := s.DeepCopy() resourceCopy.AdditionalResources.Secrets = secrets err = r.Update(ctx, resourceCopy) diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go index 299a5337def..5fd6f7c4ba7 100644 --- a/controllers/rediscache_controller.go +++ b/controllers/rediscache_controller.go @@ -160,28 +160,27 @@ func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, resourceCopy := resource.DeepCopy() resourceCopy.Status.DeploymentName = deploymentName resourceCopy.Status.ProvisioningState = provisioningState - if helpers.IsDeploymentComplete(provisioningState) { - if outputs != nil { - resourceCopy.Output.RedisCacheName = helpers.GetOutput(outputs, "redisCacheName") - resourceCopy.Output.PrimaryKey = helpers.GetOutput(outputs, "primaryKey") - resourceCopy.Output.SecondaryKey = helpers.GetOutput(outputs, "secondaryKey") - } - } err := r.Status().Update(ctx, resourceCopy) if err != nil { log.Error(err, "unable to update Redis Cache status") return nil, err } - log.Info("Updated Status", "Redis Cache.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.Status", resourceCopy.Status, "RedisCache.Output", resourceCopy.Output) + log.V(1).Info("Updated Status", "Redis Cache.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.Status", resourceCopy.Status) if helpers.IsDeploymentComplete(provisioningState) { - err := r.syncAdditionalResources(req, resourceCopy) + if outputs != nil { + resourceCopy.Output.RedisCacheName = helpers.GetOutput(outputs, "redisCacheName") + resourceCopy.Output.PrimaryKey = helpers.GetOutput(outputs, "primaryKey") + resourceCopy.Output.SecondaryKey = helpers.GetOutput(outputs, "secondaryKey") + } + + err := r.syncAdditionalResourcesAndOutput(req, resourceCopy) if err != nil { log.Error(err, "error syncing resources") return nil, err } - log.Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.AdditionalResources", resourceCopy.AdditionalResources) + log.V(1).Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.AdditionalResources", resourceCopy.AdditionalResources, "RedisCache.Output", resourceCopy.Output) } return resourceCopy, nil @@ -211,13 +210,10 @@ func (r *RedisCacheReconciler) deleteExternalResources(instance *servicev1alpha1 return nil } -func (r *RedisCacheReconciler) syncAdditionalResources(req ctrl.Request, s *servicev1alpha1.RedisCache) (err error) { +func (r *RedisCacheReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.RedisCache) (err error) { ctx := context.Background() log := r.Log.WithValues("redisCache", req.NamespacedName) - resource := &servicev1alpha1.RedisCache{} - r.Get(ctx, req.NamespacedName, resource) - secrets := []string{} secretData := map[string]string{ "redisCacheName": "{{.Obj.Output.RedisCacheName}}", @@ -227,7 +223,7 @@ func (r *RedisCacheReconciler) syncAdditionalResources(req ctrl.Request, s *serv secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) secrets = append(secrets, secret) - resourceCopy := resource.DeepCopy() + resourceCopy := s.DeepCopy() resourceCopy.AdditionalResources.Secrets = secrets err = r.Update(ctx, resourceCopy) diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index 009da5bc0ef..5d300deac20 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -160,6 +160,14 @@ func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, de resourceCopy := resource.DeepCopy() resourceCopy.Status.DeploymentName = deploymentName resourceCopy.Status.ProvisioningState = provisioningState + + err := r.Status().Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Storage status") + return nil, err + } + log.V(1).Info("Updated Status", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.Status", resourceCopy.Status) + if helpers.IsDeploymentComplete(provisioningState) { if outputs != nil { resourceCopy.Output.StorageAccountName = helpers.GetOutput(outputs, "storageAccountName") @@ -168,22 +176,13 @@ func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, de resourceCopy.Output.ConnectionString1 = helpers.GetOutput(outputs, "connectionString1") resourceCopy.Output.ConnectionString2 = helpers.GetOutput(outputs, "connectionString2") } - } - err := r.Status().Update(ctx, resourceCopy) - if err != nil { - log.Error(err, "unable to update Storage status") - return nil, err - } - log.Info("Updated Status", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.Status", resourceCopy.Status, "Storage.Output", resourceCopy.Output) - - if helpers.IsDeploymentComplete(provisioningState) { - err := r.syncAdditionalResources(req, resourceCopy) + err := r.syncAdditionalResourcesAndOutput(req, resourceCopy) if err != nil { log.Error(err, "error syncing resources") return nil, err } - log.Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.AdditionalResources", resourceCopy.AdditionalResources) + log.V(1).Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.AdditionalResources", resourceCopy.AdditionalResources, "Storage.Output", resourceCopy.Output) } return resourceCopy, nil @@ -213,13 +212,10 @@ func (r *StorageReconciler) deleteExternalResources(instance *servicev1alpha1.St return nil } -func (r *StorageReconciler) syncAdditionalResources(req ctrl.Request, s *servicev1alpha1.Storage) (err error) { +func (r *StorageReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.Storage) (err error) { ctx := context.Background() log := r.Log.WithValues("storage", req.NamespacedName) - resource := &servicev1alpha1.Storage{} - r.Get(ctx, req.NamespacedName, resource) - secrets := []string{} secretData := map[string]string{ "storageAccountName": "{{.Obj.Output.StorageAccountName}}", @@ -231,7 +227,7 @@ func (r *StorageReconciler) syncAdditionalResources(req ctrl.Request, s *service secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) secrets = append(secrets, secret) - resourceCopy := resource.DeepCopy() + resourceCopy := s.DeepCopy() resourceCopy.AdditionalResources.Secrets = secrets err = r.Update(ctx, resourceCopy) diff --git a/pkg/helpers/secret.go b/pkg/helpers/secret.go index eb1e24d151e..dc6f7a891fd 100644 --- a/pkg/helpers/secret.go +++ b/pkg/helpers/secret.go @@ -29,13 +29,11 @@ func CreateSecret(resource interface{}, svcName, svcNamespace string, secretTemp _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Get(secretName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { - log.Info("Creating Secret", "Secret.Name", secretName) _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Create(secretObj) if err != nil { log.Error(err, "error creating Secret") } } else { - log.Info("Updating Secret", "Secret.Name", secretName) _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Update(secretObj) if err != nil { log.Error(err, "error updating Secret") From edd2ccadcf415557d5374f1fe8fc829f321ed54b Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Tue, 23 Jul 2019 16:41:48 +0800 Subject: [PATCH 18/34] Deploy operator on a remote cluster --- Makefile | 7 ++- config/manager/manager.yaml | 4 ++ docs/development.md | 101 +++++++++++++++++++++++++----------- 3 files changed, 81 insertions(+), 31 deletions(-) diff --git a/Makefile b/Makefile index c6eb78b300c..f7d1a1252f5 100644 --- a/Makefile +++ b/Makefile @@ -59,13 +59,16 @@ generate: controller-gen # Build the docker image docker-build: test docker build . -t ${IMG} - @echo "updating kustomize image patch file for manager resource" - sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml + @echo "please manually update kustomize image patch file for manager resource" + #sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml # Push the docker image docker-push: docker push ${IMG} +# Build and Push the docker image +build-and-push: docker-build docker-push + # find or download controller-gen # download controller-gen if necessary controller-gen: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index b6c85a52d5f..57aae23a9c0 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -27,6 +27,10 @@ spec: - /manager args: - --enable-leader-election + - --tenant-id= + - --subscription-id= + - --client-id= + - --client-secret= image: controller:latest name: manager resources: diff --git a/docs/development.md b/docs/development.md index 0c4ad2a2434..b5559336e23 100644 --- a/docs/development.md +++ b/docs/development.md @@ -8,48 +8,91 @@ To get started you will need: * [kubebuilder](https://book.kubebuilder.io/quick-start.html#installation) * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -## Deploy Operator on a Local Cluster +## Deploy Operator and Test -### 1. Create Cluster +### Test it locally -``` -kind create cluster -export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" -``` +1. Create Cluster. -### 2. Install CRDs + ``` + kind create cluster + export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" + kubectl cluster-info + ``` -``` -make install -``` +1. Install CRDs. -### 3. Run Controller + ``` + make install + ``` -Setup the environment variables: +1. Run Controller. -``` -export CLOUD_NAME=AzurePublicCloud -export TENANT_ID= -export SUBSCRIPTION_ID= -export CLIENT_ID= -export CLIENT_SECRET= -``` + Setup the environment variables: -Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): + ``` + export CLOUD_NAME=AzurePublicCloud + export TENANT_ID= + export SUBSCRIPTION_ID= + export CLIENT_ID= + export CLIENT_SECRET= + ``` -``` -make run -``` + Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): -Refer to [kubebuilder's doc](https://book.kubebuilder.io/quick-start.html#test-it-out-locally). + ``` + make run + ``` -## Create a Custom Resource + Refer to [kubebuilder's doc](https://book.kubebuilder.io/quick-start.html#test-it-out-locally). -Create your CR (make sure to edit them first to specify the fields). Example: +1. Create a Custom Resource. -``` -kubectl apply -f config/samples/service_v1alpha1_storage.yaml -``` + Create your CR (make sure to edit them first to specify the fields). Example: + + ``` + kubectl apply -f config/samples/service_v1alpha1_storage.yaml + ``` + +### Test it on a remote cluster + +1. Create Cluster. + + ``` + az aks create -g -n + az aks get-credentials -g -n + kubectl cluster-info + ``` + +1. Install CRDs. + + ``` + make install + ``` + +1. Build and Push the image. + + ``` + IMG= make build-and-push + ``` + + Update kustomize image patch file `config/default/manager_image_patch.yaml` for manager resource manually. + +1. Run Controller. + + Update `config/manager/manager.yaml` with your service principal. + + ``` + make deploy + ``` + +1. Create a Custom Resource. + + Create your CR (make sure to edit them first to specify the fields). Example: + + ``` + kubectl apply -f config/samples/service_v1alpha1_storage.yaml + ``` ## Add a New Custom Resource From 425f873acfc5a26a0cd946eed8e49d1884718414 Mon Sep 17 00:00:00 2001 From: Zhongyi Zhang Date: Wed, 24 Jul 2019 01:44:58 +0000 Subject: [PATCH 19/34] add a sample app deployment yaml --- config/samples/.gitkeep | 0 docs/development.md | 4 +- examples/demo/azure-vote-app-redis.yaml | 65 +++++++++++++++++++ .../service/v1alpha1/cosmosdb.yaml | 0 .../service/v1alpha1/rediscache.yaml | 0 .../service/v1alpha1/storage.yaml | 0 6 files changed, 67 insertions(+), 2 deletions(-) create mode 100644 config/samples/.gitkeep create mode 100644 examples/demo/azure-vote-app-redis.yaml rename config/samples/service_v1alpha1_cosmosdb.yaml => examples/service/v1alpha1/cosmosdb.yaml (100%) rename config/samples/service_v1alpha1_rediscache.yaml => examples/service/v1alpha1/rediscache.yaml (100%) rename config/samples/service_v1alpha1_storage.yaml => examples/service/v1alpha1/storage.yaml (100%) diff --git a/config/samples/.gitkeep b/config/samples/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/development.md b/docs/development.md index b5559336e23..696fb9d2093 100644 --- a/docs/development.md +++ b/docs/development.md @@ -51,7 +51,7 @@ To get started you will need: Create your CR (make sure to edit them first to specify the fields). Example: ``` - kubectl apply -f config/samples/service_v1alpha1_storage.yaml + kubectl apply -f examples/service/v1alpha1/storage.yaml ``` ### Test it on a remote cluster @@ -91,7 +91,7 @@ To get started you will need: Create your CR (make sure to edit them first to specify the fields). Example: ``` - kubectl apply -f config/samples/service_v1alpha1_storage.yaml + kubectl apply -f examples/service/v1alpha1/storage.yaml ``` ## Add a New Custom Resource diff --git a/examples/demo/azure-vote-app-redis.yaml b/examples/demo/azure-vote-app-redis.yaml new file mode 100644 index 00000000000..260d1c3d453 --- /dev/null +++ b/examples/demo/azure-vote-app-redis.yaml @@ -0,0 +1,65 @@ +apiVersion: service.azure/v1alpha1 +kind: RedisCache +metadata: + name: rediscache-sample +spec: + location: eastus2 + properties: + sku: + name: Basic + family: C + capacity: 1 + enableNonSslPort: true +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-vote-front +spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "beta.kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: microsoft/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS_NAME + valueFrom: + secretKeyRef: + name: rediscache-sample + key: redisCacheName + - name: REDIS + value: $(REDIS_NAME).redis.cache.windows.net + - name: REDIS_PWD + valueFrom: + secretKeyRef: + name: rediscache-sample + key: primaryKey +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-vote-front +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: azure-vote-front diff --git a/config/samples/service_v1alpha1_cosmosdb.yaml b/examples/service/v1alpha1/cosmosdb.yaml similarity index 100% rename from config/samples/service_v1alpha1_cosmosdb.yaml rename to examples/service/v1alpha1/cosmosdb.yaml diff --git a/config/samples/service_v1alpha1_rediscache.yaml b/examples/service/v1alpha1/rediscache.yaml similarity index 100% rename from config/samples/service_v1alpha1_rediscache.yaml rename to examples/service/v1alpha1/rediscache.yaml diff --git a/config/samples/service_v1alpha1_storage.yaml b/examples/service/v1alpha1/storage.yaml similarity index 100% rename from config/samples/service_v1alpha1_storage.yaml rename to examples/service/v1alpha1/storage.yaml From 22097d9faf05276bf45e83d3cfa960e8508a67fc Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Wed, 24 Jul 2019 13:16:18 +0800 Subject: [PATCH 20/34] Generate assets for the templates --- Makefile | 8 + pkg/client/deployment/deployment.go | 11 +- pkg/cosmosdb/cosmosdb_template.go | 8 +- pkg/rediscache/rediscache_template.go | 8 +- pkg/storage/storage_template.go | 8 +- .../template/assets}/cosmosdb.json | 0 .../template/assets}/rediscache.json | 0 .../template/assets}/storage.json | 0 pkg/template/templates.go | 281 ++++++++++++++++++ 9 files changed, 311 insertions(+), 13 deletions(-) rename {template => pkg/template/assets}/cosmosdb.json (100%) rename {template => pkg/template/assets}/rediscache.json (100%) rename {template => pkg/template/assets}/storage.json (100%) create mode 100644 pkg/template/templates.go diff --git a/Makefile b/Makefile index f7d1a1252f5..ab026351b86 100644 --- a/Makefile +++ b/Makefile @@ -78,3 +78,11 @@ CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen else CONTROLLER_GEN=$(shell which controller-gen) endif + +.PHONY: install-bindata +install-bindata: + go get -u github.com/jteeuwen/go-bindata/... + +.PHONE: +generate: + go-bindata -pkg template -prefix pkg/template/assets/ -o pkg/template/templates.go pkg/template/assets/ diff --git a/pkg/client/deployment/deployment.go b/pkg/client/deployment/deployment.go index bb3634a6e13..1ab6e99e044 100644 --- a/pkg/client/deployment/deployment.go +++ b/pkg/client/deployment/deployment.go @@ -18,20 +18,17 @@ func getDeploymentsClient() resources.DeploymentsClient { // CreateDeployment creates a template deployment using the // referenced JSON files for the template and its parameters -func CreateDeployment(ctx context.Context, resourceGroupName, deploymentName, templateUri string, params *map[string]interface{}) error { +func CreateDeployment(ctx context.Context, resourceGroupName, deploymentName string, template, params *map[string]interface{}) error { deployClient := getDeploymentsClient() - templateLink := resources.TemplateLink{ - URI: &templateUri, - } _, err := deployClient.CreateOrUpdate( ctx, resourceGroupName, deploymentName, resources.Deployment{ Properties: &resources.DeploymentProperties{ - TemplateLink: &templateLink, - Parameters: params, - Mode: resources.Incremental, + Template: template, + Parameters: params, + Mode: resources.Incremental, }, }, ) diff --git a/pkg/cosmosdb/cosmosdb_template.go b/pkg/cosmosdb/cosmosdb_template.go index a9bce2cd181..f1a3f0d97c5 100644 --- a/pkg/cosmosdb/cosmosdb_template.go +++ b/pkg/cosmosdb/cosmosdb_template.go @@ -2,11 +2,13 @@ package cosmosdb import ( "context" + "encoding/json" uuid "github.com/satori/go.uuid" azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" "github.com/Azure/azure-service-operator/pkg/client/deployment" + "github.com/Azure/azure-service-operator/pkg/template" ) // New generates a new object @@ -23,7 +25,9 @@ type Template struct { func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { deploymentName := uuid.NewV4().String() - templateURI := "https://azureserviceoperator.blob.core.windows.net/templates/cosmosdb.json" + asset, err := template.Asset("cosmosdb.json") + templateContents := make(map[string]interface{}) + json.Unmarshal(asset, &templateContents) params := map[string]interface{}{ "location": map[string]interface{}{ "value": t.CosmosDB.Spec.Location, @@ -36,6 +40,6 @@ func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName strin }, } - err := deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) + err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) return deploymentName, err } diff --git a/pkg/rediscache/rediscache_template.go b/pkg/rediscache/rediscache_template.go index 38a86858ae2..7c1fdf9d6db 100644 --- a/pkg/rediscache/rediscache_template.go +++ b/pkg/rediscache/rediscache_template.go @@ -2,11 +2,13 @@ package rediscache import ( "context" + "encoding/json" uuid "github.com/satori/go.uuid" azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" "github.com/Azure/azure-service-operator/pkg/client/deployment" + "github.com/Azure/azure-service-operator/pkg/template" ) // New generates a new object @@ -23,7 +25,9 @@ type Template struct { func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { deploymentName := uuid.NewV4().String() - templateURI := "https://azureserviceoperator.blob.core.windows.net/templates/rediscache.json" + asset, err := template.Asset("rediscache.json") + templateContents := make(map[string]interface{}) + json.Unmarshal(asset, &templateContents) params := map[string]interface{}{ "location": map[string]interface{}{ "value": t.RedisCache.Spec.Location, @@ -42,6 +46,6 @@ func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName strin }, } - err := deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) + err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) return deploymentName, err } diff --git a/pkg/storage/storage_template.go b/pkg/storage/storage_template.go index 9267595b4c4..e8502d73e5f 100644 --- a/pkg/storage/storage_template.go +++ b/pkg/storage/storage_template.go @@ -2,11 +2,13 @@ package storage import ( "context" + "encoding/json" uuid "github.com/satori/go.uuid" azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" "github.com/Azure/azure-service-operator/pkg/client/deployment" + "github.com/Azure/azure-service-operator/pkg/template" ) // New generates a new object @@ -23,7 +25,9 @@ type Template struct { func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { deploymentName := uuid.NewV4().String() - templateURI := "https://azureserviceoperator.blob.core.windows.net/templates/storage.json" + asset, err := template.Asset("storage.json") + templateContents := make(map[string]interface{}) + json.Unmarshal(asset, &templateContents) params := map[string]interface{}{ "location": map[string]interface{}{ "value": t.Storage.Spec.Location, @@ -42,6 +46,6 @@ func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName strin }, } - err := deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, templateURI, ¶ms) + err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) return deploymentName, err } diff --git a/template/cosmosdb.json b/pkg/template/assets/cosmosdb.json similarity index 100% rename from template/cosmosdb.json rename to pkg/template/assets/cosmosdb.json diff --git a/template/rediscache.json b/pkg/template/assets/rediscache.json similarity index 100% rename from template/rediscache.json rename to pkg/template/assets/rediscache.json diff --git a/template/storage.json b/pkg/template/assets/storage.json similarity index 100% rename from template/storage.json rename to pkg/template/assets/storage.json diff --git a/pkg/template/templates.go b/pkg/template/templates.go new file mode 100644 index 00000000000..dc5af57bee4 --- /dev/null +++ b/pkg/template/templates.go @@ -0,0 +1,281 @@ +// Code generated by go-bindata. +// sources: +// pkg/template/assets/cosmosdb.json +// pkg/template/assets/rediscache.json +// pkg/template/assets/storage.json +// DO NOT EDIT! + +package template + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _cosmosdbJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x54\x4d\x6b\xdb\x40\x10\xbd\xfb\x57\x88\x6d\x41\x12\xc8\xb2\x5c\x5a\x28\xbe\x35\x18\x4a\x09\xa9\x0b\x0d\xbd\x18\x1f\xc6\xab\x71\xb2\xa9\xb4\xb3\xdd\x9d\x35\xa8\xc5\xff\xbd\xc8\xf2\x87\x24\x5b\x49\x88\x7d\x31\x33\xf3\xde\x9b\xd9\x37\xe3\x7f\xa3\x20\x08\x02\xf1\xde\xc9\x47\x2c\x41\xcc\x02\xf1\xc8\x6c\x66\x93\x49\x13\x48\x4b\xd0\xf0\x80\x25\x6a\x4e\xe1\xaf\xb7\x98\x4a\x2a\x0f\x39\x37\xf9\x90\x4d\x3f\x8d\xb3\xe9\x38\x9b\x4e\x72\x34\x05\x55\x75\xdd\x3d\x96\xa6\x00\xc6\xf4\xc9\x91\x7e\x27\x92\x46\x40\x92\x66\xd4\xfc\x0b\xad\x53\xa4\x6b\x9d\x69\x9a\xd5\xdf\x63\x81\x01\x0b\x25\x32\x5a\x27\x66\x41\xd3\xd5\x3e\x5e\x90\x04\x6e\x20\xe7\xe8\x3e\xc3\x95\xc1\x9a\xe8\x27\x5b\xa5\x1f\xc4\x29\xb9\x4b\xce\xe8\xdf\x4a\xe7\x6f\x43\x1a\x4b\x06\x2d\x2b\x74\xc3\xf8\xc5\xfa\x09\x25\xb7\xf0\xa3\x16\x8b\xd8\x82\x55\xb0\x2e\x7a\x04\x42\x92\x2b\xc9\xcd\x6f\xbe\x43\xb9\x27\x59\x4a\xd2\x12\x38\x0a\xc1\x51\x98\x04\x5e\xab\x3f\x1e\x9b\xce\x22\x8b\x8e\xbc\x95\xf8\xd5\x92\x37\x51\x9c\xaa\x3c\x8e\x57\xa2\xa3\x72\x2c\xa9\x55\x96\x27\x95\x81\x86\xef\x94\xb4\xe4\x68\xc3\xe9\x9c\xa4\xaf\xdd\x9a\xdf\x4c\x72\x60\x58\x83\xc3\x2f\x52\x92\xd7\xec\x44\xd2\x05\x83\x51\x2d\xdb\x1a\xcf\x3f\x8e\xb3\xcf\xfd\x3a\x7d\x1c\xe8\x34\x78\x14\xb6\x87\x0d\xe3\x55\x1f\xd2\x32\x57\x2c\xcf\x1b\x10\x85\xc7\xc4\x15\x4c\x8e\x06\x75\xee\x16\x35\x68\xb9\xea\x25\x0f\x7e\x77\xc9\xea\xe0\x15\xa2\x67\x1c\x6e\x84\xba\xef\xb2\xd8\x6c\xd0\xde\x1f\x1e\xb2\xc3\x7f\x26\x0a\xe3\x74\x08\xd5\x97\xef\xcc\xdf\x35\xaf\xfd\xb9\xec\xeb\x02\x7c\x5a\xa4\xd7\x3d\x60\x87\x63\x03\xaa\xa0\x2d\xda\x1f\x56\x91\x55\x5c\x89\x59\x90\x5d\xad\xde\x5d\x44\x57\xa3\xeb\xf9\xe6\xd7\xc1\x18\x41\x9e\x8d\xe7\xe7\x4f\x60\x60\x59\x5d\x73\x9d\x3d\xd7\xb6\x50\xf8\x17\xd7\x6c\xe0\xa2\x55\x09\xb6\xba\x03\xc7\x68\x6f\xb1\x7a\xbb\x72\xa1\x1c\xdf\x62\xe5\x4e\xf7\xf9\x2d\x8f\xc2\x57\xdd\x56\x98\x04\x83\x6d\xc7\x49\x10\x9e\xaf\x2b\x8c\xd3\x7e\xc3\xab\x8b\x7f\x9a\xd1\x6e\xf4\x3f\x00\x00\xff\xff\x6e\x83\x09\x2a\xc1\x05\x00\x00") + +func cosmosdbJsonBytes() ([]byte, error) { + return bindataRead( + _cosmosdbJson, + "cosmosdb.json", + ) +} + +func cosmosdbJson() (*asset, error) { + bytes, err := cosmosdbJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cosmosdb.json", size: 1473, mode: os.FileMode(420), modTime: time.Unix(1563943619, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _rediscacheJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\x4d\x8b\xdb\x30\x10\xbd\xe7\x57\x18\xb5\x60\x1b\x1c\xc5\xde\x52\x28\xb9\xf6\x50\xca\xd2\xa5\x74\x4b\x2f\x21\x87\x59\x65\x92\xa8\x6b\x6b\x54\x7d\x2c\xa4\x25\xff\xbd\x38\x4e\x6c\xc7\x5f\xe9\x61\x0f\xc5\x37\xcd\x9b\xf7\x9e\x3c\x6f\xf4\x67\x16\x04\x41\xc0\xde\x5a\xb1\xc7\x02\xd8\x32\x60\x7b\xe7\xf4\x72\xb1\xa8\x0e\x78\x01\x0a\x76\x58\xa0\x72\x1c\x7e\x7b\x83\x5c\x50\x71\xae\xd9\xc5\x5d\x9a\xbd\x9f\xa7\xd9\x3c\xcd\x16\x1b\xd4\x39\x1d\x4a\xdc\x77\x2c\x74\x0e\x0e\xf9\x4f\x4b\xea\x0d\x4b\x2a\x01\x41\xca\xa1\x72\x3f\xd0\x58\x49\xaa\xd4\xc9\x78\x5a\x7e\x17\x80\x06\x03\x05\x3a\x34\x96\x2d\x83\xca\xd5\xe9\x3c\x27\x01\xae\x6a\x69\x4e\x4f\x15\x77\xd0\x58\x12\x59\x67\xa4\xda\xb1\xba\x78\x4c\x9a\x6e\x6d\x48\xa3\x71\x12\x2d\xb7\xcf\x9e\x2b\x28\xf0\x55\x88\xb6\x50\xc8\xfc\xf0\x2a\x54\x02\x34\x08\xe9\x26\xc8\xa4\x72\x37\x99\x50\xc1\x53\x8e\x0f\xa4\x1e\x6d\xfe\x95\x8c\x1b\x67\x7b\x22\xca\x5b\x74\xb3\x16\x29\x7b\x01\x23\x4b\x9e\xce\x0c\x0c\x6e\xa4\xfd\x08\x62\x8f\x0f\xd5\x0f\x64\x2b\x41\x4a\x80\x8b\x42\xb0\x14\x26\x81\x57\xf2\x97\xc7\xc7\xd3\xa5\x23\x83\x96\xbc\x11\xf8\xc9\x90\xd7\x51\xcc\xe5\x26\x8e\xd7\xec\x4a\xe7\x02\x29\x75\x56\xb5\xce\x88\xe1\x2f\x52\x18\xb2\xb4\x75\xfc\x64\x61\xf1\xad\x74\xc3\x92\x6b\x30\x68\xd9\xca\xd6\x5d\x9a\x7d\x98\xa7\xef\xe6\x69\xd6\xc5\xa9\xcb\x05\xea\xab\x46\xe1\xf5\xf5\xc2\x78\xdd\x6d\x6a\x65\x90\xad\x9a\xa0\x46\xe1\xa5\x30\xd0\xd3\xcc\xa6\x37\x8a\x53\xdd\x3e\xfb\xc1\xc2\xb5\xcb\xb6\xd8\x40\x96\xfb\xba\x35\x45\x9d\xd0\x49\x92\x0a\x35\x41\xd3\x4a\x67\xda\x43\x1c\xfb\x4d\x6c\x20\x88\xa3\x0e\xba\xd8\x61\x1f\xe7\xf8\x91\xda\xca\x9d\x37\xf5\x63\x70\xbc\x02\x1e\x3b\x89\x5e\x9f\x93\x46\xde\x69\xef\x6e\xe5\x79\x7a\x8b\x3b\x93\x7d\x81\xdc\xff\x43\x86\x46\x36\x56\x16\x60\x0e\xf7\x78\xfb\xed\x18\x55\xcd\xa5\x75\xf7\x78\xb0\xf5\xa2\x7d\xde\x44\xe1\xe0\x92\x84\x49\x30\x61\x31\x4e\x82\xb0\x59\x94\x30\xe6\x8d\xb9\x11\xf7\x16\x05\xa9\xcd\x7f\xeb\xbf\x6d\x6f\xdd\x7b\xe2\x66\xc7\xd9\xdf\x00\x00\x00\xff\xff\xfe\x1f\x63\x43\xed\x06\x00\x00") + +func rediscacheJsonBytes() ([]byte, error) { + return bindataRead( + _rediscacheJson, + "rediscache.json", + ) +} + +func rediscacheJson() (*asset, error) { + bytes, err := rediscacheJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "rediscache.json", size: 1773, mode: os.FileMode(420), modTime: time.Unix(1563943619, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _storageJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x55\x4f\x6f\xd3\x4e\x10\xbd\xe7\x53\x58\xfe\xfd\xa4\x4d\x24\xc7\xb1\x2b\x21\x50\xab\x1c\x40\x20\x40\x15\x14\xa9\x11\x97\x28\x87\x65\x3d\x69\x97\xda\x3b\xcb\xce\x6c\x8b\x41\xfd\xee\xc8\x71\x9d\x38\x4e\x4c\xaa\xf4\x80\x40\xbe\xed\xfc\x7b\xef\xf9\xcd\xee\xcf\x41\x10\x04\x41\xf8\x3f\xa9\x6b\x28\x64\x78\x1a\x84\xd7\xcc\xf6\x74\x32\xa9\x0f\xe2\x42\x1a\x79\x05\x05\x18\x8e\xe5\x0f\xef\x20\x56\x58\x3c\xc4\x68\x72\x92\xa4\xcf\xc6\x49\x3a\x4e\xd2\x49\x06\x36\xc7\xb2\xca\x9b\x41\x61\x73\xc9\x10\x7f\x25\x34\xff\x85\x51\x3d\x40\xa1\x61\x30\xfc\x19\x1c\x69\x34\xd5\x9c\x34\x4e\xaa\xaf\x49\xb0\xd2\xc9\x02\x18\x1c\x85\xa7\x41\x8d\x6a\x75\x9e\xa3\x92\x5c\x97\x6c\x4e\x57\x11\x2e\x2d\x54\x8d\x2e\xd9\x69\x73\x15\xae\x83\xf7\xd1\xa6\x5a\x2a\x85\xde\xf0\xac\x4e\x3d\xa2\xc1\x8d\x36\xd9\xd1\xa3\x81\x68\xa6\xc1\x1d\x57\x4f\xde\x5a\x74\x4c\xef\x98\x2d\xcd\x9c\x5c\x2e\xb5\xba\x30\x79\xd9\xdf\xed\x15\x62\xde\xea\x35\x68\x75\x0c\x6f\xa5\xd3\xf2\x4b\x0e\x1d\x75\x89\xd1\xc9\x2b\x78\x59\xcb\xf4\x51\x16\xab\x46\x73\x85\x46\x49\x1e\x0a\x49\x28\xa2\xc0\x1b\xfd\xcd\x43\x8d\x75\xe8\x80\xd0\x3b\x05\x6f\x1d\x7a\x3b\x1c\xc5\x3a\x1b\x8d\x16\xe1\xd6\xac\x26\xa5\x9a\x35\x5f\xcf\xea\x01\xfd\x41\x2b\x87\x84\x4b\x8e\x2f\x6b\x2c\x93\x6d\x4c\x14\x46\xdb\x85\xd2\xea\x96\x8b\x4e\x92\xf4\xc5\x38\x79\x3e\x4e\xd2\x6e\x9e\x69\xc8\xac\xa9\x0f\xc5\x2e\x5d\x31\x5a\x74\x0b\x5b\x8e\x0b\xe7\x1b\x5b\x0e\x45\x13\xd8\x53\x93\x81\x05\x93\xd1\x45\x55\x34\x5f\x74\x82\x74\xe3\x77\x7e\xda\x36\xc4\xf6\x94\x96\x65\x45\xa3\xec\x1e\x7f\x04\x2d\x77\x6e\x37\xa8\x0e\xf7\x40\xb4\x0e\x2d\x38\xd6\x1d\x0b\x6c\x74\x6d\xfb\x75\x07\xd2\x43\x68\xb7\x6f\x70\xc0\xab\xdb\x9d\xfa\x12\x77\x99\x76\x7c\xfc\xa0\x69\x88\x9e\xad\xe7\xc7\xb8\xb8\xc7\x6f\x54\xaf\x5c\x47\x9c\x5b\x99\xfb\x47\xba\x65\xff\x35\x01\x65\x7a\xfc\xcc\x5c\x13\x9f\x43\x49\xeb\xe5\x7a\x9f\x0d\xc5\xc1\xc5\x10\x51\x70\x00\xec\x28\x0a\xc4\x66\x41\xc4\x28\xbe\x81\x92\xe6\xc9\x22\x5e\x8d\xee\xa7\x72\xf2\xb7\x50\x49\x7f\x4f\x45\xa1\x31\xa0\xaa\x95\xad\x2f\xaf\x27\xfc\xa2\xe6\x46\x7c\x0d\x4b\xe9\x73\x7e\x63\x32\x8b\xda\x30\x7d\x72\xc8\xa8\x30\x9f\x56\xcf\x26\x9d\xb5\x30\x4f\x45\x74\x80\x53\x24\x9a\xfc\x73\x28\xa7\x22\xfa\x33\x36\x88\xc4\x59\xc3\xe6\xd2\x2f\x97\xfa\xfb\x54\xa1\x83\xf8\x4e\x9b\x0c\xef\x28\x36\xc0\xbd\xae\xef\xea\xfb\x04\xdf\xfc\x7b\xfa\xa6\x47\xea\x5b\x3f\xa5\x83\xfb\xc1\xaf\x00\x00\x00\xff\xff\xa5\xf7\x8d\x6d\x9b\x09\x00\x00") + +func storageJsonBytes() ([]byte, error) { + return bindataRead( + _storageJson, + "storage.json", + ) +} + +func storageJson() (*asset, error) { + bytes, err := storageJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "storage.json", size: 2459, mode: os.FileMode(420), modTime: time.Unix(1563943619, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "cosmosdb.json": cosmosdbJson, + "rediscache.json": rediscacheJson, + "storage.json": storageJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "cosmosdb.json": &bintree{cosmosdbJson, map[string]*bintree{}}, + "rediscache.json": &bintree{rediscacheJson, map[string]*bintree{}}, + "storage.json": &bintree{storageJson, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} From 95fb61c8e352964be26d853f83cda5c55abbb2eb Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Wed, 24 Jul 2019 13:42:24 +0800 Subject: [PATCH 21/34] Requeue after 30 seconds to avoid too many requests Ignore the NotFound error when deleting cosmosdb --- controllers/cosmosdb_controller.go | 7 ++++--- controllers/rediscache_controller.go | 3 ++- controllers/storage_controller.go | 3 ++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go index ce3108d30f5..c4630f67b21 100644 --- a/controllers/cosmosdb_controller.go +++ b/controllers/cosmosdb_controller.go @@ -26,6 +26,7 @@ package controllers import ( "context" + "time" "github.com/go-logr/logr" ctrl "sigs.k8s.io/controller-runtime" @@ -113,7 +114,7 @@ func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { return ctrl.Result{}, nil } else { log.Info("Requeue the request", "ProvisioningState", provisioningState) - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil } } @@ -197,12 +198,12 @@ func (r *CosmosDBReconciler) deleteExternalResources(instance *servicev1alpha1.C resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "cosmosdb", instance.Name, instance.Namespace) log.Info("Deleting CosmosDB Account", "ResourceGroupName", resourceGroupName) _, err := group.DeleteGroup(ctx, resourceGroupName) - if err != nil { + if err != nil && helpers.IgnoreAzureResourceNotFound(err) != nil { return err } err = helpers.DeleteSecret(instance.Name, instance.Namespace) - if err != nil { + if err != nil && helpers.IgnoreKubernetesResourceNotFound(err) != nil { return err } diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go index 5fd6f7c4ba7..e4c73fa7fd7 100644 --- a/controllers/rediscache_controller.go +++ b/controllers/rediscache_controller.go @@ -26,6 +26,7 @@ package controllers import ( "context" + "time" "github.com/go-logr/logr" ctrl "sigs.k8s.io/controller-runtime" @@ -113,7 +114,7 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) return ctrl.Result{}, nil } else { log.Info("Requeue the request", "ProvisioningState", provisioningState) - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil } } diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index 5d300deac20..327ca7efd53 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -26,6 +26,7 @@ package controllers import ( "context" + "time" "github.com/go-logr/logr" ctrl "sigs.k8s.io/controller-runtime" @@ -113,7 +114,7 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { return ctrl.Result{}, nil } else { log.Info("Requeue the request", "ProvisioningState", provisioningState) - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil } } From 205d045009ca7613692c4f3cce67289e9135fb69 Mon Sep 17 00:00:00 2001 From: ZeroMagic Date: Wed, 24 Jul 2019 13:43:21 +0800 Subject: [PATCH 22/34] Fix a bug of missing capacity of rediscache template --- pkg/template/assets/rediscache.json | 2 +- pkg/template/templates.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/template/assets/rediscache.json b/pkg/template/assets/rediscache.json index 79b77e0a1e7..551af6f2ce5 100644 --- a/pkg/template/assets/rediscache.json +++ b/pkg/template/assets/rediscache.json @@ -31,7 +31,7 @@ "sku": { "name": "[parameters('properties.sku.name')]", "family": "[parameters('properties.sku.family')]", - "capacity": 0 + "capacity": "[parameters('properties.sku.capacity')]" }, "enableNonSslPort": "[parameters('properties.enableNonSslPort')]", "redisConfiguration": {} diff --git a/pkg/template/templates.go b/pkg/template/templates.go index dc5af57bee4..6a9798c4ebc 100644 --- a/pkg/template/templates.go +++ b/pkg/template/templates.go @@ -85,12 +85,12 @@ func cosmosdbJson() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "cosmosdb.json", size: 1473, mode: os.FileMode(420), modTime: time.Unix(1563943619, 0)} + info := bindataFileInfo{name: "cosmosdb.json", size: 1473, mode: os.FileMode(420), modTime: time.Unix(1563946123, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _rediscacheJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\x4d\x8b\xdb\x30\x10\xbd\xe7\x57\x18\xb5\x60\x1b\x1c\xc5\xde\x52\x28\xb9\xf6\x50\xca\xd2\xa5\x74\x4b\x2f\x21\x87\x59\x65\x92\xa8\x6b\x6b\x54\x7d\x2c\xa4\x25\xff\xbd\x38\x4e\x6c\xc7\x5f\xe9\x61\x0f\xc5\x37\xcd\x9b\xf7\x9e\x3c\x6f\xf4\x67\x16\x04\x41\xc0\xde\x5a\xb1\xc7\x02\xd8\x32\x60\x7b\xe7\xf4\x72\xb1\xa8\x0e\x78\x01\x0a\x76\x58\xa0\x72\x1c\x7e\x7b\x83\x5c\x50\x71\xae\xd9\xc5\x5d\x9a\xbd\x9f\xa7\xd9\x3c\xcd\x16\x1b\xd4\x39\x1d\x4a\xdc\x77\x2c\x74\x0e\x0e\xf9\x4f\x4b\xea\x0d\x4b\x2a\x01\x41\xca\xa1\x72\x3f\xd0\x58\x49\xaa\xd4\xc9\x78\x5a\x7e\x17\x80\x06\x03\x05\x3a\x34\x96\x2d\x83\xca\xd5\xe9\x3c\x27\x01\xae\x6a\x69\x4e\x4f\x15\x77\xd0\x58\x12\x59\x67\xa4\xda\xb1\xba\x78\x4c\x9a\x6e\x6d\x48\xa3\x71\x12\x2d\xb7\xcf\x9e\x2b\x28\xf0\x55\x88\xb6\x50\xc8\xfc\xf0\x2a\x54\x02\x34\x08\xe9\x26\xc8\xa4\x72\x37\x99\x50\xc1\x53\x8e\x0f\xa4\x1e\x6d\xfe\x95\x8c\x1b\x67\x7b\x22\xca\x5b\x74\xb3\x16\x29\x7b\x01\x23\x4b\x9e\xce\x0c\x0c\x6e\xa4\xfd\x08\x62\x8f\x0f\xd5\x0f\x64\x2b\x41\x4a\x80\x8b\x42\xb0\x14\x26\x81\x57\xf2\x97\xc7\xc7\xd3\xa5\x23\x83\x96\xbc\x11\xf8\xc9\x90\xd7\x51\xcc\xe5\x26\x8e\xd7\xec\x4a\xe7\x02\x29\x75\x56\xb5\xce\x88\xe1\x2f\x52\x18\xb2\xb4\x75\xfc\x64\x61\xf1\xad\x74\xc3\x92\x6b\x30\x68\xd9\xca\xd6\x5d\x9a\x7d\x98\xa7\xef\xe6\x69\xd6\xc5\xa9\xcb\x05\xea\xab\x46\xe1\xf5\xf5\xc2\x78\xdd\x6d\x6a\x65\x90\xad\x9a\xa0\x46\xe1\xa5\x30\xd0\xd3\xcc\xa6\x37\x8a\x53\xdd\x3e\xfb\xc1\xc2\xb5\xcb\xb6\xd8\x40\x96\xfb\xba\x35\x45\x9d\xd0\x49\x92\x0a\x35\x41\xd3\x4a\x67\xda\x43\x1c\xfb\x4d\x6c\x20\x88\xa3\x0e\xba\xd8\x61\x1f\xe7\xf8\x91\xda\xca\x9d\x37\xf5\x63\x70\xbc\x02\x1e\x3b\x89\x5e\x9f\x93\x46\xde\x69\xef\x6e\xe5\x79\x7a\x8b\x3b\x93\x7d\x81\xdc\xff\x43\x86\x46\x36\x56\x16\x60\x0e\xf7\x78\xfb\xed\x18\x55\xcd\xa5\x75\xf7\x78\xb0\xf5\xa2\x7d\xde\x44\xe1\xe0\x92\x84\x49\x30\x61\x31\x4e\x82\xb0\x59\x94\x30\xe6\x8d\xb9\x11\xf7\x16\x05\xa9\xcd\x7f\xeb\xbf\x6d\x6f\xdd\x7b\xe2\x66\xc7\xd9\xdf\x00\x00\x00\xff\xff\xfe\x1f\x63\x43\xed\x06\x00\x00") +var _rediscacheJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\x4d\x8f\xd3\x30\x10\xbd\xf7\x57\x44\x06\x29\x89\x94\xba\xc9\x22\x24\xd4\x2b\x07\x84\x56\xac\x10\x8b\xb8\x54\x3d\xcc\xba\xd3\xd6\x6c\xe2\x31\xfe\x58\xa9\xa0\xfe\x77\x94\xa6\xf9\x68\x9a\xa4\x1c\xf6\x40\x7b\xf3\x3c\xbf\xf7\x26\xf3\xc6\x7f\x66\x41\x10\x04\xec\xad\x15\x7b\x2c\x80\x2d\x03\xb6\x77\x4e\x2f\x17\x8b\xea\x80\x17\xa0\x60\x87\x05\x2a\xc7\xe1\xb7\x37\xc8\x05\x15\xe7\x9a\x5d\xdc\xa5\xd9\xfb\x79\x9a\xcd\xd3\x6c\xb1\x41\x9d\xd3\xa1\xc4\x7d\xc7\x42\xe7\xe0\x90\xff\xb4\xa4\xde\xb0\xa4\x12\x10\xa4\x1c\x2a\xf7\x03\x8d\x95\xa4\x4a\x9d\x8c\xa7\xe5\xbf\x06\x68\x30\x50\xa0\x43\x63\xd9\x32\xa8\x5c\x9d\xce\x73\x12\xe0\xaa\x2b\xed\xe9\xa9\xe2\x0e\x1a\x4b\x22\xeb\x8c\x54\x3b\xd6\x14\x8f\x49\x7b\x5b\x1b\xd2\x68\x9c\x44\xcb\xed\xb3\xe7\x0a\x0a\x7c\x15\xa2\x2d\x14\x32\x3f\xbc\x0a\x95\x00\x0d\x42\xba\x09\x32\xa9\xdc\x4d\x26\x54\xf0\x94\xe3\x03\xa9\x47\x9b\x7f\x25\xe3\xc6\xd9\x9e\x88\xf2\x0e\xdd\xac\x43\xca\x5e\xc0\xc8\x92\xa7\x37\x03\x83\x1b\x69\x3f\x82\xd8\xe3\x43\xf5\x01\xd9\x4a\x90\x12\xe0\xa2\x10\x2c\x85\x49\xe0\x95\xfc\xe5\xf1\xf1\xd4\x74\x64\xd0\x92\x37\x02\x3f\x19\xf2\x3a\x8a\xb9\xdc\xc4\xf1\x9a\x5d\xe8\xd4\x90\x52\x67\xd5\xe8\x8c\x18\xfe\x22\x85\x21\x4b\x5b\xc7\x4f\x16\x16\xdf\x4a\x37\x2c\xb9\x04\x83\x96\x9d\x6c\xdd\xa5\xd9\x87\x79\xfa\x6e\x9e\x66\x7d\x9c\xaa\x1b\x68\x5a\x8d\xc2\xcb\xf6\xc2\x78\xdd\xbf\xd4\xc9\x20\x5b\xb5\x41\x8d\xc2\xba\x30\x70\xa7\x9d\xcd\xd5\x28\x4e\x75\xfb\xec\x07\x0b\x97\x2e\xbb\x62\x03\x59\xbe\xd6\x6d\x28\x9a\x84\x4e\x92\x54\xa8\x09\x9a\x4e\x3a\x27\x89\x6a\x5c\x58\x8f\xba\xfb\x3b\x5e\x93\xb3\x81\xc0\x8e\x0a\xf4\xb1\xc3\x7e\xcf\x31\x25\xb5\x95\x3b\x6f\x9a\x47\xe3\x78\x01\x3c\xf6\x92\xbf\x3e\x27\x92\xbc\xd3\xde\xdd\xca\xfd\xf4\xb6\xf7\x12\xf0\x02\xb9\xff\x87\xac\x8d\x6c\xb6\x2c\xc0\x1c\xee\xf1\xf6\x1b\x33\xaa\x9a\x4b\xeb\xee\xf1\x60\x9b\x85\xfc\xbc\x89\xc2\xc1\x65\x0a\x93\x60\xc2\x62\x9c\x04\x61\xbb\x50\x61\xcc\x5b\x73\x23\xee\x2d\x0a\x52\x9b\xff\xd6\x7f\xd7\xde\xfa\xea\x29\x9c\x1d\x67\x7f\x03\x00\x00\xff\xff\xa7\x3e\x2d\x81\x15\x07\x00\x00") func rediscacheJsonBytes() ([]byte, error) { return bindataRead( @@ -105,7 +105,7 @@ func rediscacheJson() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "rediscache.json", size: 1773, mode: os.FileMode(420), modTime: time.Unix(1563943619, 0)} + info := bindataFileInfo{name: "rediscache.json", size: 1813, mode: os.FileMode(420), modTime: time.Unix(1563946509, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -125,7 +125,7 @@ func storageJson() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "storage.json", size: 2459, mode: os.FileMode(420), modTime: time.Unix(1563943619, 0)} + info := bindataFileInfo{name: "storage.json", size: 2459, mode: os.FileMode(420), modTime: time.Unix(1563946123, 0)} a := &asset{bytes: bytes, info: info} return a, nil } From 9af1ae1f0e55568dbb9a5cc04fea0623e94bb741 Mon Sep 17 00:00:00 2001 From: ZeroMagic Date: Tue, 23 Jul 2019 09:26:17 +0000 Subject: [PATCH 23/34] fix: judge whether resources need to be updated With adding generation in status, we can judge whether resources need to be updated. Co-authored-by: Bin Xia --- api/v1alpha1/cosmosdb_types.go | 2 ++ api/v1alpha1/rediscache_types.go | 1 + api/v1alpha1/storage_types.go | 1 + config/crd/bases/service.azure_cosmosdbs.yaml | 6 +++--- config/crd/bases/service.azure_rediscaches.yaml | 3 +++ config/crd/bases/service.azure_storages.yaml | 3 +++ controllers/cosmosdb_controller.go | 9 ++++++++- controllers/rediscache_controller.go | 12 +++++++++++- controllers/storage_controller.go | 11 ++++++++++- 9 files changed, 42 insertions(+), 6 deletions(-) diff --git a/api/v1alpha1/cosmosdb_types.go b/api/v1alpha1/cosmosdb_types.go index 7d1340b05ef..b4cbd0c65aa 100644 --- a/api/v1alpha1/cosmosdb_types.go +++ b/api/v1alpha1/cosmosdb_types.go @@ -81,8 +81,10 @@ type CosmosDBLocation struct { type CosmosDBStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + DeploymentName string `json:"deploymentName,omitempty"` ProvisioningState string `json:"provisioningState,omitempty"` + Generation int64 `json:"generation,omitempty"` } type CosmosDBOutput struct { diff --git a/api/v1alpha1/rediscache_types.go b/api/v1alpha1/rediscache_types.go index 80bdedac4b4..1fdb39a8f85 100644 --- a/api/v1alpha1/rediscache_types.go +++ b/api/v1alpha1/rediscache_types.go @@ -83,6 +83,7 @@ type RedisCacheStatus struct { DeploymentName string `json:"deploymentName,omitempty"` ProvisioningState string `json:"provisioningState,omitempty"` + Generation int64 `json:"generation,omitempty"` } type RedisCacheOutput struct { diff --git a/api/v1alpha1/storage_types.go b/api/v1alpha1/storage_types.go index 176ce8de8ca..7a0757967b0 100644 --- a/api/v1alpha1/storage_types.go +++ b/api/v1alpha1/storage_types.go @@ -84,6 +84,7 @@ type StorageStatus struct { DeploymentName string `json:"deploymentName,omitempty"` ProvisioningState string `json:"provisioningState,omitempty"` + Generation int64 `json:"generation,omitempty"` } type StorageOutput struct { diff --git a/config/crd/bases/service.azure_cosmosdbs.yaml b/config/crd/bases/service.azure_cosmosdbs.yaml index e5eb061f9c7..734ce52388b 100644 --- a/config/crd/bases/service.azure_cosmosdbs.yaml +++ b/config/crd/bases/service.azure_cosmosdbs.yaml @@ -72,10 +72,10 @@ spec: description: CosmosDBStatus defines the observed state of CosmosDB properties: deploymentName: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' type: string + generation: + format: int64 + type: integer provisioningState: type: string type: object diff --git a/config/crd/bases/service.azure_rediscaches.yaml b/config/crd/bases/service.azure_rediscaches.yaml index 4ad9cfcd827..9bccb81542f 100644 --- a/config/crd/bases/service.azure_rediscaches.yaml +++ b/config/crd/bases/service.azure_rediscaches.yaml @@ -77,6 +77,9 @@ spec: properties: deploymentName: type: string + generation: + format: int64 + type: integer provisioningState: type: string type: object diff --git a/config/crd/bases/service.azure_storages.yaml b/config/crd/bases/service.azure_storages.yaml index 5d8479132ac..0e17feddd40 100644 --- a/config/crd/bases/service.azure_storages.yaml +++ b/config/crd/bases/service.azure_storages.yaml @@ -101,6 +101,9 @@ spec: properties: deploymentName: type: string + generation: + format: int64 + type: integer provisioningState: type: string type: object diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go index c4630f67b21..c4d2852a052 100644 --- a/controllers/cosmosdb_controller.go +++ b/controllers/cosmosdb_controller.go @@ -111,13 +111,20 @@ func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if err != nil { return ctrl.Result{}, err } - return ctrl.Result{}, nil + if instance.Status.Generation == instance.ObjectMeta.Generation { + return ctrl.Result{}, nil + } } else { log.Info("Requeue the request", "ProvisioningState", provisioningState) return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil } } + instance.Status.Generation = instance.ObjectMeta.Generation + if err := r.Status().Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } + log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) tags := map[string]*string{ "name": to.StringPtr(instance.Name), diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go index e4c73fa7fd7..444eeeaa25c 100644 --- a/controllers/rediscache_controller.go +++ b/controllers/rediscache_controller.go @@ -104,6 +104,9 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) if deploymentName != "" { log.Info("Checking deployment", "ResourceGroupName", resourceGroupName, "DeploymentName", deploymentName) de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) + if de.Properties == nil || de.Properties.ProvisioningState == nil { + return ctrl.Result{}, nil + } provisioningState := *de.Properties.ProvisioningState if helpers.IsDeploymentComplete(provisioningState) { log.Info("Deployment is complete", "ProvisioningState", provisioningState) @@ -111,13 +114,20 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) if err != nil { return ctrl.Result{}, err } - return ctrl.Result{}, nil + if instance.Status.Generation == instance.ObjectMeta.Generation { + return ctrl.Result{}, nil + } } else { log.Info("Requeue the request", "ProvisioningState", provisioningState) return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil } } + instance.Status.Generation = instance.ObjectMeta.Generation + if err := r.Status().Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } + log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) tags := map[string]*string{ "name": to.StringPtr(instance.Name), diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index 327ca7efd53..d09e084725d 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -104,6 +104,9 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if deploymentName != "" { log.Info("Checking deployment", "ResourceGroupName", resourceGroupName, "DeploymentName", deploymentName) de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) + if de.Properties == nil || de.Properties.ProvisioningState == nil { + return ctrl.Result{}, nil + } provisioningState := *de.Properties.ProvisioningState if helpers.IsDeploymentComplete(provisioningState) { log.Info("Deployment is complete", "ProvisioningState", provisioningState) @@ -111,12 +114,18 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if err != nil { return ctrl.Result{}, err } - return ctrl.Result{}, nil + if instance.Status.Generation == instance.ObjectMeta.Generation { + return ctrl.Result{}, nil + } } else { log.Info("Requeue the request", "ProvisioningState", provisioningState) return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil } } + instance.Status.Generation = instance.ObjectMeta.Generation + if err := r.Status().Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) tags := map[string]*string{ From e4bfa743bd2db549718f5749d8a53c1c43b0db5c Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Wed, 24 Jul 2019 14:43:15 +0800 Subject: [PATCH 24/34] Add docs to run the demo --- Makefile | 2 +- README.md | 58 ++++++++++++++++++++ config/default/manager_auth_proxy_patch.yaml | 4 ++ config/default/manager_image_patch.yaml | 2 +- config/manager/manager.yaml | 4 -- docs/development.md | 2 - examples/demo/azure-vote-app-redis.yaml | 6 +- 7 files changed, 67 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index ab026351b86..ef5484728d7 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # Image URL to use all building/pushing image targets -IMG ?= controller:latest +IMG ?= bingosummer/azure-service-operator:v0.0.2 # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) CRD_OPTIONS ?= "crd:trivialVersions=true" diff --git a/README.md b/README.md index b81a84e5649..12af74f5df0 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,61 @@ +# Azure Service Operator + +The Azure Service Operator allows you to manage Azure resources using Kubernetes [Custom Resource Definitions (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). + +## Prerequisites + +* a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster, e.g. [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). +* [kubebuilder](https://book.kubebuilder.io/quick-start.html#installation) +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +## Getting Started + +To walkthrough the demo, you'll need an AKS cluster because the service needs an external IP address. + +1. Create Cluster. + + ``` + az aks create -g -n + az aks get-credentials -g -n + kubectl cluster-info + ``` + +1. Install CRDs. + + ``` + make install + ``` + +1. Run Controller. + + Update `config/default/manager_auth_proxy_patch.yaml` with your service principal. + + ``` + make deploy + ``` + +1. Run the demo. + + ``` + kubectl apply -f examples/demo/ + ``` + +1. Test the demo. + + To monitor progress, use the `kubectl get service` command with the `--watch` argument. + + ``` + kubectl get service azure-vote-front --watch + ``` + + Initially the `EXTERNAL-IP` for the `azure-vote-front` service is shown as pending. + + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + azure-vote-front LoadBalancer 10.0.37.27 80:30572/TCP 6s + ``` + + After the `EXTERNAL-IP` address changes from `pending` to an actual public IP address, open a web browser to the external IP address of your service. # Contributing diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index d3994fb918f..4aa6564edba 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -22,3 +22,7 @@ spec: - name: manager args: - "--metrics-addr=127.0.0.1:8080" + - --tenant-id= + - --subscription-id= + - --client-id= + - --client-secret= diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index eb909570e17..2da6e850fc6 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: IMAGE_URL + - image: bingosummer/azure-service-operator:v0.0.2 name: manager diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 57aae23a9c0..b6c85a52d5f 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -27,10 +27,6 @@ spec: - /manager args: - --enable-leader-election - - --tenant-id= - - --subscription-id= - - --client-id= - - --client-secret= image: controller:latest name: manager resources: diff --git a/docs/development.md b/docs/development.md index 696fb9d2093..c06ba3be01e 100644 --- a/docs/development.md +++ b/docs/development.md @@ -2,8 +2,6 @@ ## Prerequisites -To get started you will need: - * a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster, e.g. [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). * [kubebuilder](https://book.kubebuilder.io/quick-start.html#installation) * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) diff --git a/examples/demo/azure-vote-app-redis.yaml b/examples/demo/azure-vote-app-redis.yaml index 260d1c3d453..6609a498a14 100644 --- a/examples/demo/azure-vote-app-redis.yaml +++ b/examples/demo/azure-vote-app-redis.yaml @@ -1,7 +1,7 @@ apiVersion: service.azure/v1alpha1 kind: RedisCache metadata: - name: rediscache-sample + name: azure-redis spec: location: eastus2 properties: @@ -43,14 +43,14 @@ spec: - name: REDIS_NAME valueFrom: secretKeyRef: - name: rediscache-sample + name: azure-redis key: redisCacheName - name: REDIS value: $(REDIS_NAME).redis.cache.windows.net - name: REDIS_PWD valueFrom: secretKeyRef: - name: rediscache-sample + name: azure-redis key: primaryKey --- apiVersion: v1 From 3753397479db16818c60cbb82ac8b19a1d795b85 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Wed, 24 Jul 2019 17:21:47 +0800 Subject: [PATCH 25/34] Update manager-role to operate secrets Workaround: the rule should be appended. But I don't know how for now. The workaround is to copy config/rbac/role.yaml and add the new rule. Should be fixed in future. --- config/default/kustomization.yaml | 1 + config/default/manager_role_patch.yaml | 80 ++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 config/default/manager_role_patch.yaml diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index f62b8280aa8..2d88df6d833 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -27,6 +27,7 @@ patches: # Only one of manager_auth_proxy_patch.yaml and # manager_prometheus_metrics_patch.yaml should be enabled. - manager_auth_proxy_patch.yaml +- manager_role_patch.yaml # If you want your controller-manager to expose the /metrics # endpoint w/o any authn/z, uncomment the following line and # comment manager_auth_proxy_patch.yaml. diff --git a/config/default/manager_role_patch.yaml b/config/default/manager_role_patch.yaml new file mode 100644 index 00000000000..ecf4ba85b76 --- /dev/null +++ b/config/default/manager_role_patch.yaml @@ -0,0 +1,80 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - service.azure + resources: + - cosmosdbs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - cosmosdbs/status + verbs: + - get + - patch + - update +- apiGroups: + - service.azure + resources: + - rediscaches + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - rediscaches/status + verbs: + - get + - patch + - update +- apiGroups: + - service.azure + resources: + - storages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - storages/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch \ No newline at end of file From 073ed3f3ef2cac2969a08722fdf6314ffb528506 Mon Sep 17 00:00:00 2001 From: Bin Xia Date: Tue, 6 Aug 2019 23:01:29 +0800 Subject: [PATCH 26/34] fix(Makefile): rename the target from "generate" to "generate-template" to avoid conflict --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ef5484728d7..a3fb968aca5 100644 --- a/Makefile +++ b/Makefile @@ -84,5 +84,5 @@ install-bindata: go get -u github.com/jteeuwen/go-bindata/... .PHONE: -generate: +generate-template: go-bindata -pkg template -prefix pkg/template/assets/ -o pkg/template/templates.go pkg/template/assets/ From 218234ff3d30bac379e3ba02ef4fcc8f3a527709 Mon Sep 17 00:00:00 2001 From: Chris Risner Date: Tue, 27 Aug 2019 22:08:10 -0600 Subject: [PATCH 27/34] Refactoring data focused operators. Storage currently working though it needs cleanup --- PROJECT | 2 +- api/{v1alpha1 => v1}/storage_types.go | 44 ++-- api/v1alpha1/storage_types_test.go | 85 -------- api/v1alpha1/zz_generated.deepcopy.go | 147 -------------- ...yaml => azure.microsoft.com_storages.yaml} | 23 ++- config/rbac/role.yaml | 96 ++++----- .../v1alpha1 => config/samples}/cosmosdb.yaml | 0 .../samples}/rediscache.yaml | 0 .../v1alpha1 => config/samples}/storage.yaml | 7 +- controllers/storage_controller.go | 191 ++++++++++++++++-- main.go | 5 +- pkg/resourcemanager/storages/storages.go | 132 ++++++++++++ pkg/storage/storage_template.go | 51 ----- 13 files changed, 396 insertions(+), 387 deletions(-) rename api/{v1alpha1 => v1}/storage_types.go (89%) delete mode 100644 api/v1alpha1/storage_types_test.go rename config/crd/bases/{service.azure_storages.yaml => azure.microsoft.com_storages.yaml} (98%) rename {examples/service/v1alpha1 => config/samples}/cosmosdb.yaml (100%) rename {examples/service/v1alpha1 => config/samples}/rediscache.yaml (100%) rename {examples/service/v1alpha1 => config/samples}/storage.yaml (50%) create mode 100644 pkg/resourcemanager/storages/storages.go delete mode 100644 pkg/storage/storage_template.go diff --git a/PROJECT b/PROJECT index e0c44d3b58b..979254fcd9b 100644 --- a/PROJECT +++ b/PROJECT @@ -3,7 +3,7 @@ domain: microsoft.com repo: github.com/Azure/azure-service-operator resources: - group: service - version: v1alpha1 + version: v1 kind: Storage - group: service version: v1alpha1 diff --git a/api/v1alpha1/storage_types.go b/api/v1/storage_types.go similarity index 89% rename from api/v1alpha1/storage_types.go rename to api/v1/storage_types.go index 7a0757967b0..4a32f9f737b 100644 --- a/api/v1alpha1/storage_types.go +++ b/api/v1/storage_types.go @@ -22,7 +22,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,7 +38,8 @@ type StorageSpec struct { // +kubebuilder:validation:MinLength=0 - Location string `json:"location,omitempty"` + Location string `json:"location,omitempty"` + ResourceGroupName string `json:"resourceGroup"` Sku StorageSku `json:"sku,omitempty"` @@ -82,22 +83,11 @@ type StorageStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file - DeploymentName string `json:"deploymentName,omitempty"` - ProvisioningState string `json:"provisioningState,omitempty"` - Generation int64 `json:"generation,omitempty"` -} - -type StorageOutput struct { - StorageAccountName string `json:"storageAccountName,omitempty"` - Key1 string `json:"key1,omitempty"` - Key2 string `json:"key2,omitempty"` - ConnectionString1 string `json:"connectionString1,omitempty"` - ConnectionString2 string `json:"connectionString2,omitempty"` -} - -// StorageAdditionalResources holds the additional resources -type StorageAdditionalResources struct { - Secrets []string `json:"secrets,omitempty"` + // DeploymentName string `json:"deploymentName,omitempty"` + // ProvisioningState string `json:"provisioningState,omitempty"` + // Generation int64 `json:"generation,omitempty"` + Provisioning bool `json:"provisioning,omitempty"` + Provisioned bool `json:"provisioned,omitempty"` } // +kubebuilder:object:root=true @@ -114,7 +104,21 @@ type Storage struct { AdditionalResources StorageAdditionalResources `json:"additionalResources,omitempty"` } +type StorageOutput struct { + StorageAccountName string `json:"storageAccountName,omitempty"` + Key1 string `json:"key1,omitempty"` + Key2 string `json:"key2,omitempty"` + ConnectionString1 string `json:"connectionString1,omitempty"` + ConnectionString2 string `json:"connectionString2,omitempty"` +} + +// StorageAdditionalResources holds the additional resources +type StorageAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` +} + // +kubebuilder:object:root=true +// +kubebuilder:subresource:status // StorageList contains a list of Storage type StorageList struct { @@ -126,3 +130,7 @@ type StorageList struct { func init() { SchemeBuilder.Register(&Storage{}, &StorageList{}) } + +func (storage *Storage) IsSubmitted() bool { + return storage.Status.Provisioning || storage.Status.Provisioned +} diff --git a/api/v1alpha1/storage_types_test.go b/api/v1alpha1/storage_types_test.go deleted file mode 100644 index 2f5353a1d8a..00000000000 --- a/api/v1alpha1/storage_types_test.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -MIT License - -Copyright (c) Microsoft Corporation. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE -*/ - -package v1alpha1 - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "golang.org/x/net/context" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// These tests are written in BDD-style using Ginkgo framework. Refer to -// http://onsi.github.io/ginkgo to learn more. - -var _ = Describe("Storage", func() { - var ( - key types.NamespacedName - created, fetched *Storage - ) - - BeforeEach(func() { - // Add any setup steps that needs to be executed before each test - }) - - AfterEach(func() { - // Add any teardown steps that needs to be executed after each test - }) - - // Add Tests for OpenAPI validation (or additonal CRD features) specified in - // your API definition. - // Avoid adding tests for vanilla CRUD operations because they would - // test Kubernetes API server, which isn't the goal here. - Context("Create API", func() { - - It("should create an object successfully", func() { - - key = types.NamespacedName{ - Name: "foo", - Namespace: "default", - } - created = &Storage{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }} - - By("creating an API obj") - Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) - - fetched = &Storage{} - Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) - Expect(fetched).To(Equal(created)) - - By("deleting the created object") - Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) - Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) - }) - - }) - -}) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 857ab0b0439..533969188b7 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -322,150 +322,3 @@ func (in *RedisCacheStatus) DeepCopy() *RedisCacheStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Storage) DeepCopyInto(out *Storage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - out.Output = in.Output - in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. -func (in *Storage) DeepCopy() *Storage { - if in == nil { - return nil - } - out := new(Storage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Storage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageAdditionalResources) DeepCopyInto(out *StorageAdditionalResources) { - *out = *in - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAdditionalResources. -func (in *StorageAdditionalResources) DeepCopy() *StorageAdditionalResources { - if in == nil { - return nil - } - out := new(StorageAdditionalResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageList) DeepCopyInto(out *StorageList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Storage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. -func (in *StorageList) DeepCopy() *StorageList { - if in == nil { - return nil - } - out := new(StorageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StorageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOutput) DeepCopyInto(out *StorageOutput) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOutput. -func (in *StorageOutput) DeepCopy() *StorageOutput { - if in == nil { - return nil - } - out := new(StorageOutput) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageSku) DeepCopyInto(out *StorageSku) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSku. -func (in *StorageSku) DeepCopy() *StorageSku { - if in == nil { - return nil - } - out := new(StorageSku) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { - *out = *in - out.Sku = in.Sku - if in.EnableHTTPSTrafficOnly != nil { - in, out := &in.EnableHTTPSTrafficOnly, &out.EnableHTTPSTrafficOnly - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. -func (in *StorageSpec) DeepCopy() *StorageSpec { - if in == nil { - return nil - } - out := new(StorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. -func (in *StorageStatus) DeepCopy() *StorageStatus { - if in == nil { - return nil - } - out := new(StorageStatus) - in.DeepCopyInto(out) - return out -} diff --git a/config/crd/bases/service.azure_storages.yaml b/config/crd/bases/azure.microsoft.com_storages.yaml similarity index 98% rename from config/crd/bases/service.azure_storages.yaml rename to config/crd/bases/azure.microsoft.com_storages.yaml index cb78dce423c..36bd1ce2c1f 100644 --- a/config/crd/bases/service.azure_storages.yaml +++ b/config/crd/bases/azure.microsoft.com_storages.yaml @@ -4,9 +4,9 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: creationTimestamp: null - name: storages.service.azure + name: storages.azure.microsoft.com spec: - group: service.azure + group: azure.microsoft.com names: kind: Storage plural: storages @@ -444,6 +444,8 @@ spec: location: minLength: 0 type: string + resourceGroup: + type: string sku: description: Sku the SKU of the storage account. properties: @@ -465,21 +467,22 @@ spec: type: object supportsHttpsTrafficOnly: type: boolean + required: + - resourceGroup type: object status: description: StorageStatus defines the observed state of Storage properties: - deploymentName: - type: string - generation: - format: int64 - type: integer - provisioningState: - type: string + provisioned: + type: boolean + provisioning: + description: DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` Generation int64 `json:"generation,omitempty"` + type: boolean type: object type: object versions: - - name: v1alpha1 + - name: v1 served: true storage: true status: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index d1ea43ebd46..13454a16c39 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -7,17 +7,12 @@ metadata: name: manager-role rules: - apiGroups: - - apps + - azure.microsoft.com resources: - - deployments + - events verbs: - create - - delete - - get - - list - patch - - update - - watch - apiGroups: - service.azure resources: @@ -41,7 +36,7 @@ rules: - apiGroups: - azure.microsoft.com resources: - - eventhubnamespaces + - eventhubs verbs: - create - delete @@ -53,15 +48,15 @@ rules: - apiGroups: - azure.microsoft.com resources: - - keyvaults/status + - eventhubs/status verbs: - get - patch - update - apiGroups: - - service.azure + - azure.microsoft.com resources: - - rediscaches/status + - eventhubnamespaces/status verbs: - get - patch @@ -69,15 +64,15 @@ rules: - apiGroups: - azure.microsoft.com resources: - - resourcegroups/status + - keyvaults/status verbs: - get - patch - update - apiGroups: - - "" + - azure.microsoft.com resources: - - secrets + - consumergroups verbs: - create - delete @@ -86,33 +81,34 @@ rules: - patch - update - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - watch - apiGroups: - azure.microsoft.com resources: - - consumergroups/status + - eventhubnamespaces verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - azure.microsoft.com + - service.azure resources: - - eventhubs/status + - rediscaches verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - azure.microsoft.com resources: - - keyvaults + - resourcegroups verbs: - create - delete @@ -130,21 +126,17 @@ rules: - patch - update - apiGroups: - - azure.microsoft.com + - service.azure resources: - - resourcegroups + - rediscaches/status verbs: - - create - - delete - get - - list - patch - update - - watch - apiGroups: - service.azure resources: - - rediscaches + - storages verbs: - create - delete @@ -156,34 +148,35 @@ rules: - apiGroups: - service.azure resources: - - storages + - storages/status verbs: - - create - - delete - get - - list - patch - update - - watch - apiGroups: - - service.azure + - apps resources: - - storages/status + - deployments verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - azure.microsoft.com resources: - - events + - consumergroups/status verbs: - - create + - get - patch + - update - apiGroups: - azure.microsoft.com resources: - - consumergroups + - keyvaults verbs: - create - delete @@ -195,7 +188,15 @@ rules: - apiGroups: - azure.microsoft.com resources: - - eventhubs + - resourcegroups/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - secrets verbs: - create - delete @@ -205,10 +206,9 @@ rules: - update - watch - apiGroups: - - azure.microsoft.com + - "" resources: - - eventhubnamespaces/status + - events verbs: - - get - - patch - - update + - create + - watch diff --git a/examples/service/v1alpha1/cosmosdb.yaml b/config/samples/cosmosdb.yaml similarity index 100% rename from examples/service/v1alpha1/cosmosdb.yaml rename to config/samples/cosmosdb.yaml diff --git a/examples/service/v1alpha1/rediscache.yaml b/config/samples/rediscache.yaml similarity index 100% rename from examples/service/v1alpha1/rediscache.yaml rename to config/samples/rediscache.yaml diff --git a/examples/service/v1alpha1/storage.yaml b/config/samples/storage.yaml similarity index 50% rename from examples/service/v1alpha1/storage.yaml rename to config/samples/storage.yaml index c7ceb59e84d..df8ba9d465e 100644 --- a/examples/service/v1alpha1/storage.yaml +++ b/config/samples/storage.yaml @@ -1,9 +1,10 @@ -apiVersion: service.azure/v1alpha1 +apiVersion: azure.microsoft.com/v1 kind: Storage metadata: - name: storage-sample + name: storagesample123xyzkj spec: - location: eastus2 + location: westus + resourceGroup: resourcegroup-sample-1907 sku: name: Standard_RAGRS kind: StorageV2 diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index d09e084725d..4d7f3fa6c0e 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -26,50 +26,73 @@ package controllers import ( "context" + "fmt" + "os" + "strconv" "time" + //resoucegroupsresourcemanager "github.com/Azure/azure-service-operator/pkg/resourcemanager/resourcegroups" "github.com/go-logr/logr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" - "github.com/Azure/azure-service-operator/pkg/client/deployment" - "github.com/Azure/azure-service-operator/pkg/client/group" - "github.com/Azure/azure-service-operator/pkg/config" - "github.com/Azure/azure-service-operator/pkg/helpers" - storagetemplate "github.com/Azure/azure-service-operator/pkg/storage" - "github.com/Azure/go-autorest/autorest/to" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + //servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + //"github.com/Azure/azure-service-operator/pkg/client/deployment" + //"github.com/Azure/azure-service-operator/pkg/client/group" + //"github.com/Azure/azure-service-operator/pkg/config" + "github.com/Azure/azure-service-operator/pkg/errhelp" + helpers "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/storages" + "k8s.io/client-go/tools/record" + // "github.com/Azure/azure-service-operator/pkg/helpers" + //storagetemplate "github.com/Azure/azure-service-operator/pkg/storage" + // "github.com/Azure/go-autorest/autorest/to" ) +const storageFinalizerName = "storage.finalizers.azure.com" + // StorageReconciler reconciles a Storage object type StorageReconciler struct { client.Client - Log logr.Logger + Log logr.Logger + Recorder record.EventRecorder + RequeueTime time.Duration } // +kubebuilder:rbac:groups=service.azure,resources=storages,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=service.azure,resources=storages/status,verbs=get;update;patch +// Reconcile function does the main reconciliation loop of the operator func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { ctx := context.Background() log := r.Log.WithValues("storage", req.NamespacedName) // Fetch the Storage instance - instance := &servicev1alpha1.Storage{} - err := r.Get(ctx, req.NamespacedName, instance) + //instance := &servicev1alpha1.Storage{} + var instance azurev1.Storage + + requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) if err != nil { - log.Error(err, "unable to fetch Storage") + requeueAfter = 30 + } + + if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { + // if err != nil { + log.Error(err, "unable to retrieve storage resource", "err", err.Error()) // we'll ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them // on deleted requests. - return ctrl.Result{}, helpers.IgnoreKubernetesResourceNotFound(err) + //return ctrl.Result{}, helpers.IgnoreKubernetesResourceNotFound(err) + return ctrl.Result{}, client.IgnoreNotFound(err) } + log.Info("Getting Storage Account", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) log.V(1).Info("Describing Storage Account", "Storage", instance) - storageFinalizerName := "storage.finalizers.azure" + //storageFinalizerName := "storage.finalizers.azure" // examine DeletionTimestamp to determine if object is under deletion - if instance.ObjectMeta.DeletionTimestamp.IsZero() { + /*if instance.ObjectMeta.DeletionTimestamp.IsZero() { // The object is not being deleted, so if it does not have our finalizer, // then lets add the finalizer and update the object. This is equivalent // registering our finalizer. @@ -97,9 +120,55 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { } return ctrl.Result{}, err + }*/ + if helpers.IsBeingDeleted(&instance) { + if helpers.HasFinalizer(&instance, storageFinalizerName) { + if err := r.deleteExternal(&instance); err != nil { + log.Info("Delete Storage failed with ", err.Error()) + return ctrl.Result{}, err + } + + helpers.RemoveFinalizer(&instance, storageFinalizerName) + if err := r.Update(context.Background(), &instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil } - resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "storage", instance.Name, instance.Namespace) + if !helpers.HasFinalizer(&instance, storageFinalizerName) { + if err := r.addFinalizer(&instance); err != nil { + log.Info("Adding storage finalizer failed with ", err.Error()) + return ctrl.Result{}, err + } + } + + if !instance.IsSubmitted() { + if err := r.reconcileExternal(&instance); err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + log.Info("Requeuing as the async operation is not complete") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second * time.Duration(requeueAfter), + }, nil + } + return ctrl.Result{}, fmt.Errorf("error reconciling keyvault in azure: %v", err) + } + return ctrl.Result{}, nil + } + + r.Recorder.Event(&instance, "Normal", "Provisioned", "Storage "+instance.ObjectMeta.Name+" provisioned ") + return ctrl.Result{}, nil + + //resourcegroupName := instance.ObjectMeta.Name + // log.Info("SubscriptionId: ", config.Instance.SubscriptionID) + // log.Info("ClusterName:", config.Instance.ClusterName) + // log.Info("instance.Name: ", instance.Name) + // log.Info("instance namespace: ", instance.Namespace) + //resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "storage", instance.Name, instance.Namespace) + /*esourceGroupName := helpers.AzrueResourceGroupName("b840fb3a-097c-462e-b658-5c6364683ae2", "myAKSCluster", "storage", "myinstancename", "mynamespace") + resourceGroupLocation := instance.Spec.Location + log.Info("storage controller", "rgn: ", resourceGroupName) deploymentName := instance.Status.DeploymentName if deploymentName != "" { log.Info("Checking deployment", "ResourceGroupName", resourceGroupName, "DeploymentName", deploymentName) @@ -128,12 +197,14 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { } log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) - tags := map[string]*string{ - "name": to.StringPtr(instance.Name), - "namespace": to.StringPtr(instance.Namespace), - "kind": to.StringPtr("storage"), - } - group.CreateGroup(ctx, resourceGroupName, instance.Spec.Location, tags) + // tags := map[string]*string{ + // "name": to.StringPtr(instance.Name), + // "namespace": to.StringPtr(instance.Namespace), + // "kind": to.StringPtr("storage"), + // } + //group.CreateGroup(ctx, resourceGroupName, resourcegroupLocation, tags) + //from RGController _, err = resoucegroupsresourcemanager.CreateGroup(ctx, resourcegroupName, resourcegroupLocation) + _, err = resoucegroupsresourcemanager.CreateGroup(ctx, resourceGroupName, resourceGroupLocation) log.Info("Reconciling Storage", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) template := storagetemplate.New(instance) @@ -151,14 +222,89 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { // Storage created successfully - don't requeue return ctrl.Result{}, nil + */ +} + +func (r *StorageReconciler) addFinalizer(instance *azurev1.Storage) error { + helpers.AddFinalizer(instance, storageFinalizerName) + err := r.Update(context.Background(), instance) + if err != nil { + return fmt.Errorf("failed to update finalizer: %v", err) + } + r.Recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("finalizer %s added", storageFinalizerName)) + return nil +} + +func (r *StorageReconciler) reconcileExternal(instance *azurev1.Storage) error { + ctx := context.Background() + location := instance.Spec.Location + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + sku := instance.Spec.Sku + kind := instance.Spec.Kind + accessTier := instance.Spec.AccessTier + enableHTTPSTrafficOnly := instance.Spec.EnableHTTPSTrafficOnly + //sku, kind, tags, accesstier enabblehttpstraffice + + // write information back to instance + instance.Status.Provisioning = true + + if err := r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + _, err := storages.CreateStorage(ctx, groupName, name, location, sku, kind, nil, accessTier, enableHTTPSTrafficOnly) + if err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + r.Recorder.Event(instance, "Normal", "Provisioning", name+" provisioning") + return err + } + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") + instance.Status.Provisioning = false + errUpdate := r.Status().Update(ctx, instance) + if errUpdate != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + return err + } + + instance.Status.Provisioning = false + instance.Status.Provisioned = true + + if err = r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + return nil +} + +func (r *StorageReconciler) deleteExternal(instance *azurev1.Storage) error { + ctx := context.Background() + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + _, err := storages.DeleteStorage(ctx, groupName, name) + if err != nil { + if errhelp.IsStatusCode204(err) { + r.Recorder.Event(instance, "Warning", "DoesNotExist", "Resource to delete does not exist") + return nil + } + + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't delete resouce in azure") + return err + } + + r.Recorder.Event(instance, "Normal", "Deleted", name+" deleted") + return nil } +// SetupWithManager sets up the controller functions func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&servicev1alpha1.Storage{}). + For(&azurev1.Storage{}). Complete(r) } +/* func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.Storage, error) { ctx := context.Background() log := r.Log.WithValues("storage", req.NamespacedName) @@ -248,3 +394,4 @@ func (r *StorageReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s return nil } +*/ diff --git a/main.go b/main.go index 21e822f7bc1..286baf968a8 100644 --- a/main.go +++ b/main.go @@ -105,8 +105,9 @@ func main() { } err = (&controllers.StorageReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("Storage"), + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Storage"), + Recorder: mgr.GetEventRecorderFor("Storage-controller"), }).SetupWithManager(mgr) if err != nil { setupLog.Error(err, "unable to create controller", "controller", "Storage") diff --git a/pkg/resourcemanager/storages/storages.go b/pkg/resourcemanager/storages/storages.go new file mode 100644 index 00000000000..2e6abe8d963 --- /dev/null +++ b/pkg/resourcemanager/storages/storages.go @@ -0,0 +1,132 @@ +package storages + +import ( + "context" + //"encoding/json" + "errors" + "fmt" + "log" + + //uuid "github.com/satori/go.uuid" + + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage" + //azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" + //"github.com/Azure/azure-service-operator/pkg/client/deployment" + //"github.com/Azure/azure-service-operator/pkg/template" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" +) + +// New generates a new object +// func New(storage *azureV1alpha1.Storage) *Template { +// return &Template{ +// Storage: storage, +// } +// } + +// // Template defines the dynamodb cfts +// type Template struct { +// Storage *azureV1alpha1.Storage +// } + +func getStoragesClient() storage.AccountsClient { + storagesClient := storage.NewAccountsClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + storagesClient.Authorizer = a + storagesClient.AddToUserAgent(config.UserAgent()) + return storagesClient +} + +// CreateStorage creates a new storage account +func CreateStorage(ctx context.Context, groupName string, + storageAccountName string, + location string, + sku azurev1.StorageSku, + kind azurev1.StorageKind, + tags map[string]*string, + accessTier azurev1.StorageAccessTier, + enableHTTPsTrafficOnly *bool) (storage.Account, error) { + storagesClient := getStoragesClient() + // id, err := uuid.FromString(config.TenantID()) + // if err != nil { + // return storage.Account{}, err + // } + log.Println("Storage:AccountName" + storageAccountName) + storageType := "Microsoft.Storage/storageAccounts" + checkAccountParams := storage.AccountCheckNameAvailabilityParameters{Name: &storageAccountName, Type: &storageType} + result, err := storagesClient.CheckNameAvailability(ctx, checkAccountParams) + if err != nil { + return storage.Account{}, err + } + + if *result.NameAvailable == false { + log.Fatalf("storage account not available: %v\n", result.Reason) + return storage.Account{}, errors.New("storage account not available") + } + //mskuname := string(sku.Name) + //test := storage.Sku{Name{string(sku.Name)} + //msku := storage.Sku{Name : mskuname}//"test"} + //t2 := storage.Sku(sku) + sSku := storage.Sku{Name: storage.SkuName(sku.Name)} + //sKind := string(kind) + //sKind2 := storage.Kind{storage.Kind(kind)} + sKind := storage.Kind(kind) + //sKind := storage.Kind{Kind : kind} + sAccessTier := storage.AccessTier(accessTier) + + params := storage.AccountCreateParameters{ + Location: to.StringPtr(location), + Sku: &sSku, + //storage.Sku{Name{sku.Name}}, + Kind: sKind, + Tags: tags, + Identity: nil, + AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{ + AccessTier: sAccessTier, + EnableHTTPSTrafficOnly: enableHTTPsTrafficOnly, + }, + } + + log.Println(fmt.Sprintf("creating storage '%s' in resource group '%s' and location: %v", storageAccountName, groupName, location)) + future, err := storagesClient.Create(ctx, groupName, storageAccountName, params) + return future.Result(storagesClient) +} + +// func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { +// deploymentName := uuid.NewV4().String() +// asset, err := template.Asset("storage.json") +// templateContents := make(map[string]interface{}) +// json.Unmarshal(asset, &templateContents) +// params := map[string]interface{}{ +// "location": map[string]interface{}{ +// "value": t.Storage.Spec.Location, +// }, +// "accountType": map[string]interface{}{ +// "value": t.Storage.Spec.Sku.Name, +// }, +// "kind": map[string]interface{}{ +// "value": t.Storage.Spec.Kind, +// }, +// "accessTier": map[string]interface{}{ +// "value": t.Storage.Spec.AccessTier, +// }, +// "supportsHttpsTrafficOnly": map[string]interface{}{ +// "value": *t.Storage.Spec.EnableHTTPSTrafficOnly, +// }, +// } + +// err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) +// return deploymentName, err +// } + +// DeleteStorage removes the resource group named by env var +func DeleteStorage(ctx context.Context, groupName string, storageAccountName string) (result autorest.Response, err error) { + storagesClient := getStoragesClient() + return storagesClient.Delete(ctx, groupName, storageAccountName) +} diff --git a/pkg/storage/storage_template.go b/pkg/storage/storage_template.go deleted file mode 100644 index e8502d73e5f..00000000000 --- a/pkg/storage/storage_template.go +++ /dev/null @@ -1,51 +0,0 @@ -package storage - -import ( - "context" - "encoding/json" - - uuid "github.com/satori/go.uuid" - - azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" - "github.com/Azure/azure-service-operator/pkg/client/deployment" - "github.com/Azure/azure-service-operator/pkg/template" -) - -// New generates a new object -func New(storage *azureV1alpha1.Storage) *Template { - return &Template{ - Storage: storage, - } -} - -// Template defines the dynamodb cfts -type Template struct { - Storage *azureV1alpha1.Storage -} - -func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { - deploymentName := uuid.NewV4().String() - asset, err := template.Asset("storage.json") - templateContents := make(map[string]interface{}) - json.Unmarshal(asset, &templateContents) - params := map[string]interface{}{ - "location": map[string]interface{}{ - "value": t.Storage.Spec.Location, - }, - "accountType": map[string]interface{}{ - "value": t.Storage.Spec.Sku.Name, - }, - "kind": map[string]interface{}{ - "value": t.Storage.Spec.Kind, - }, - "accessTier": map[string]interface{}{ - "value": t.Storage.Spec.AccessTier, - }, - "supportsHttpsTrafficOnly": map[string]interface{}{ - "value": *t.Storage.Spec.EnableHTTPSTrafficOnly, - }, - } - - err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) - return deploymentName, err -} From a4c320a2a58d9dc0fefd1e2389e1ab5d50f1eb95 Mon Sep 17 00:00:00 2001 From: Chris Risner Date: Wed, 28 Aug 2019 09:29:34 -0600 Subject: [PATCH 28/34] Added deepcopy generated code --- api/v1/zz_generated.deepcopy.go | 147 ++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 760242cc0f6..4c7d21dadef 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -553,3 +553,150 @@ func (in *ResourceGroupStatus) DeepCopy() *ResourceGroupStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Storage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAdditionalResources) DeepCopyInto(out *StorageAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAdditionalResources. +func (in *StorageAdditionalResources) DeepCopy() *StorageAdditionalResources { + if in == nil { + return nil + } + out := new(StorageAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageList) DeepCopyInto(out *StorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. +func (in *StorageList) DeepCopy() *StorageList { + if in == nil { + return nil + } + out := new(StorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOutput) DeepCopyInto(out *StorageOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOutput. +func (in *StorageOutput) DeepCopy() *StorageOutput { + if in == nil { + return nil + } + out := new(StorageOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSku) DeepCopyInto(out *StorageSku) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSku. +func (in *StorageSku) DeepCopy() *StorageSku { + if in == nil { + return nil + } + out := new(StorageSku) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + out.Sku = in.Sku + if in.EnableHTTPSTrafficOnly != nil { + in, out := &in.EnableHTTPSTrafficOnly, &out.EnableHTTPSTrafficOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { + if in == nil { + return nil + } + out := new(StorageStatus) + in.DeepCopyInto(out) + return out +} From 438936f111609c4913b3b8cd9f73281c9ee84972 Mon Sep 17 00:00:00 2001 From: Chris Risner Date: Wed, 28 Aug 2019 14:21:14 -0600 Subject: [PATCH 29/34] CosmosDB deploy working --- PROJECT | 2 +- api/{v1alpha1 => v1}/cosmosdb_types.go | 22 ++- api/v1/zz_generated.deepcopy.go | 142 ++++++++++++++++ api/v1alpha1/cosmosdb_types_test.go | 85 ---------- api/v1alpha1/zz_generated.deepcopy.go | 142 ---------------- ...aml => azure.microsoft.com_cosmosdbs.yaml} | 23 +-- config/rbac/role.yaml | 102 ++++++------ .../{cosmosdb.yaml => azure_v1_cosmosdb.yaml} | 5 +- .../{storage.yaml => azure_v1_storage.yaml} | 0 controllers/cosmosdb_controller.go | 152 ++++++++++++++++-- controllers/storage_controller.go | 2 +- main.go | 29 +--- pkg/cosmosdb/cosmosdb_template.go | 45 ------ pkg/resourcemanager/cosmosdbs/cosmosdbs.go | 138 ++++++++++++++++ pkg/resourcemanager/storages/storages.go | 13 +- 15 files changed, 515 insertions(+), 387 deletions(-) rename api/{v1alpha1 => v1}/cosmosdb_types.go (86%) delete mode 100644 api/v1alpha1/cosmosdb_types_test.go rename config/crd/bases/{service.azure_cosmosdbs.yaml => azure.microsoft.com_cosmosdbs.yaml} (98%) rename config/samples/{cosmosdb.yaml => azure_v1_cosmosdb.yaml} (53%) rename config/samples/{storage.yaml => azure_v1_storage.yaml} (100%) delete mode 100644 pkg/cosmosdb/cosmosdb_template.go create mode 100644 pkg/resourcemanager/cosmosdbs/cosmosdbs.go diff --git a/PROJECT b/PROJECT index 979254fcd9b..018a3253e57 100644 --- a/PROJECT +++ b/PROJECT @@ -6,7 +6,7 @@ resources: version: v1 kind: Storage - group: service - version: v1alpha1 + version: v1 kind: CosmosDB - group: service version: v1alpha1 diff --git a/api/v1alpha1/cosmosdb_types.go b/api/v1/cosmosdb_types.go similarity index 86% rename from api/v1alpha1/cosmosdb_types.go rename to api/v1/cosmosdb_types.go index b4cbd0c65aa..292e54e3529 100644 --- a/api/v1alpha1/cosmosdb_types.go +++ b/api/v1/cosmosdb_types.go @@ -22,7 +22,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,9 +38,10 @@ type CosmosDBSpec struct { // +kubebuilder:validation:MinLength=0 - Location string `json:"location,omitempty"` - Kind CosmosDBKind `json:"kind,omitempty"` - Properties CosmosDBProperties `json:"properties,omitempty"` + Location string `json:"location,omitempty"` + ResourceGroupName string `json:"resourceGroup"` + Kind CosmosDBKind `json:"kind,omitempty"` + Properties CosmosDBProperties `json:"properties,omitempty"` } // CosmosDBKind enumerates the values for kind. @@ -82,9 +83,11 @@ type CosmosDBStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file - DeploymentName string `json:"deploymentName,omitempty"` - ProvisioningState string `json:"provisioningState,omitempty"` - Generation int64 `json:"generation,omitempty"` + // DeploymentName string `json:"deploymentName,omitempty"` + // ProvisioningState string `json:"provisioningState,omitempty"` + // Generation int64 `json:"generation,omitempty"` + Provisioning bool `json:"provisioning,omitempty"` + Provisioned bool `json:"provisioned,omitempty"` } type CosmosDBOutput struct { @@ -115,6 +118,7 @@ type CosmosDB struct { } // +kubebuilder:object:root=true +// +kubebuilder:subresource:status // CosmosDBList contains a list of CosmosDB type CosmosDBList struct { @@ -126,3 +130,7 @@ type CosmosDBList struct { func init() { SchemeBuilder.Register(&CosmosDB{}, &CosmosDBList{}) } + +func (cosmosDB *CosmosDB) IsSubmitted() bool { + return cosmosDB.Status.Provisioning || cosmosDB.Status.Provisioned +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 4c7d21dadef..5fd49d60399 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -112,6 +112,148 @@ func (in *ConsumerGroupStatus) DeepCopy() *ConsumerGroupStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDB) DeepCopyInto(out *CosmosDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDB. +func (in *CosmosDB) DeepCopy() *CosmosDB { + if in == nil { + return nil + } + out := new(CosmosDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CosmosDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBAdditionalResources) DeepCopyInto(out *CosmosDBAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBAdditionalResources. +func (in *CosmosDBAdditionalResources) DeepCopy() *CosmosDBAdditionalResources { + if in == nil { + return nil + } + out := new(CosmosDBAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBList) DeepCopyInto(out *CosmosDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CosmosDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBList. +func (in *CosmosDBList) DeepCopy() *CosmosDBList { + if in == nil { + return nil + } + out := new(CosmosDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CosmosDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBOutput) DeepCopyInto(out *CosmosDBOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBOutput. +func (in *CosmosDBOutput) DeepCopy() *CosmosDBOutput { + if in == nil { + return nil + } + out := new(CosmosDBOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBProperties) DeepCopyInto(out *CosmosDBProperties) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBProperties. +func (in *CosmosDBProperties) DeepCopy() *CosmosDBProperties { + if in == nil { + return nil + } + out := new(CosmosDBProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBSpec) DeepCopyInto(out *CosmosDBSpec) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBSpec. +func (in *CosmosDBSpec) DeepCopy() *CosmosDBSpec { + if in == nil { + return nil + } + out := new(CosmosDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBStatus) DeepCopyInto(out *CosmosDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBStatus. +func (in *CosmosDBStatus) DeepCopy() *CosmosDBStatus { + if in == nil { + return nil + } + out := new(CosmosDBStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eventhub) DeepCopyInto(out *Eventhub) { *out = *in diff --git a/api/v1alpha1/cosmosdb_types_test.go b/api/v1alpha1/cosmosdb_types_test.go deleted file mode 100644 index 65273803537..00000000000 --- a/api/v1alpha1/cosmosdb_types_test.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -MIT License - -Copyright (c) Microsoft Corporation. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE -*/ - -package v1alpha1 - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "golang.org/x/net/context" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// These tests are written in BDD-style using Ginkgo framework. Refer to -// http://onsi.github.io/ginkgo to learn more. - -var _ = Describe("CosmosDB", func() { - var ( - key types.NamespacedName - created, fetched *CosmosDB - ) - - BeforeEach(func() { - // Add any setup steps that needs to be executed before each test - }) - - AfterEach(func() { - // Add any teardown steps that needs to be executed after each test - }) - - // Add Tests for OpenAPI validation (or additonal CRD features) specified in - // your API definition. - // Avoid adding tests for vanilla CRUD operations because they would - // test Kubernetes API server, which isn't the goal here. - Context("Create API", func() { - - It("should create an object successfully", func() { - - key = types.NamespacedName{ - Name: "foo", - Namespace: "default", - } - created = &CosmosDB{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }} - - By("creating an API obj") - Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) - - fetched = &CosmosDB{} - Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) - Expect(fetched).To(Equal(created)) - - By("deleting the created object") - Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) - Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) - }) - - }) - -}) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 533969188b7..ae1c92ad805 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -23,148 +23,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CosmosDB) DeepCopyInto(out *CosmosDB) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - out.Output = in.Output - in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDB. -func (in *CosmosDB) DeepCopy() *CosmosDB { - if in == nil { - return nil - } - out := new(CosmosDB) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CosmosDB) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CosmosDBAdditionalResources) DeepCopyInto(out *CosmosDBAdditionalResources) { - *out = *in - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBAdditionalResources. -func (in *CosmosDBAdditionalResources) DeepCopy() *CosmosDBAdditionalResources { - if in == nil { - return nil - } - out := new(CosmosDBAdditionalResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CosmosDBList) DeepCopyInto(out *CosmosDBList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CosmosDB, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBList. -func (in *CosmosDBList) DeepCopy() *CosmosDBList { - if in == nil { - return nil - } - out := new(CosmosDBList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CosmosDBList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CosmosDBOutput) DeepCopyInto(out *CosmosDBOutput) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBOutput. -func (in *CosmosDBOutput) DeepCopy() *CosmosDBOutput { - if in == nil { - return nil - } - out := new(CosmosDBOutput) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CosmosDBProperties) DeepCopyInto(out *CosmosDBProperties) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBProperties. -func (in *CosmosDBProperties) DeepCopy() *CosmosDBProperties { - if in == nil { - return nil - } - out := new(CosmosDBProperties) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CosmosDBSpec) DeepCopyInto(out *CosmosDBSpec) { - *out = *in - out.Properties = in.Properties -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBSpec. -func (in *CosmosDBSpec) DeepCopy() *CosmosDBSpec { - if in == nil { - return nil - } - out := new(CosmosDBSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CosmosDBStatus) DeepCopyInto(out *CosmosDBStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBStatus. -func (in *CosmosDBStatus) DeepCopy() *CosmosDBStatus { - if in == nil { - return nil - } - out := new(CosmosDBStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RedisCache) DeepCopyInto(out *RedisCache) { *out = *in diff --git a/config/crd/bases/service.azure_cosmosdbs.yaml b/config/crd/bases/azure.microsoft.com_cosmosdbs.yaml similarity index 98% rename from config/crd/bases/service.azure_cosmosdbs.yaml rename to config/crd/bases/azure.microsoft.com_cosmosdbs.yaml index d6e0d56d610..ed4c39e607a 100644 --- a/config/crd/bases/service.azure_cosmosdbs.yaml +++ b/config/crd/bases/azure.microsoft.com_cosmosdbs.yaml @@ -4,9 +4,9 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: creationTimestamp: null - name: cosmosdbs.service.azure + name: cosmosdbs.azure.microsoft.com spec: - group: service.azure + group: azure.microsoft.com names: kind: CosmosDB plural: cosmosdbs @@ -437,21 +437,24 @@ spec: - Standard type: string type: object + resourceGroup: + type: string + required: + - resourceGroup type: object status: description: CosmosDBStatus defines the observed state of CosmosDB properties: - deploymentName: - type: string - generation: - format: int64 - type: integer - provisioningState: - type: string + provisioned: + type: boolean + provisioning: + description: DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` Generation int64 `json:"generation,omitempty"` + type: boolean type: object type: object versions: - - name: v1alpha1 + - name: v1 served: true storage: true status: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 13454a16c39..2b7d235432e 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -7,16 +7,21 @@ metadata: name: manager-role rules: - apiGroups: - - azure.microsoft.com + - service.azure resources: - - events + - rediscaches verbs: - create + - delete + - get + - list - patch + - update + - watch - apiGroups: - - service.azure + - azure.microsoft.com resources: - - cosmosdbs + - resourcegroups verbs: - create - delete @@ -26,9 +31,9 @@ rules: - update - watch - apiGroups: - - service.azure + - azure.microsoft.com resources: - - cosmosdbs/status + - eventhubs/status verbs: - get - patch @@ -36,7 +41,7 @@ rules: - apiGroups: - azure.microsoft.com resources: - - eventhubs + - eventhubnamespaces verbs: - create - delete @@ -45,14 +50,6 @@ rules: - patch - update - watch -- apiGroups: - - azure.microsoft.com - resources: - - eventhubs/status - verbs: - - get - - patch - - update - apiGroups: - azure.microsoft.com resources: @@ -62,17 +59,17 @@ rules: - patch - update - apiGroups: - - azure.microsoft.com + - service.azure resources: - - keyvaults/status + - storages/status verbs: - get - patch - update - apiGroups: - - azure.microsoft.com + - "" resources: - - consumergroups + - secrets verbs: - create - delete @@ -84,7 +81,7 @@ rules: - apiGroups: - azure.microsoft.com resources: - - eventhubnamespaces + - eventhubs verbs: - create - delete @@ -94,21 +91,17 @@ rules: - update - watch - apiGroups: - - service.azure + - azure.microsoft.com resources: - - rediscaches + - consumergroups/status verbs: - - create - - delete - get - - list - patch - update - - watch - apiGroups: - azure.microsoft.com resources: - - resourcegroups + - keyvaults verbs: - create - delete @@ -118,17 +111,17 @@ rules: - update - watch - apiGroups: - - apps + - azure.microsoft.com resources: - - deployments/status + - keyvaults/status verbs: - get - patch - update - apiGroups: - - service.azure + - azure.microsoft.com resources: - - rediscaches/status + - resourcegroups/status verbs: - get - patch @@ -146,17 +139,23 @@ rules: - update - watch - apiGroups: - - service.azure + - "" resources: - - storages/status + - events verbs: - - get + - create + - watch +- apiGroups: + - azure.microsoft.com + resources: + - events + verbs: + - create - patch - - update - apiGroups: - - apps + - azure.microsoft.com resources: - - deployments + - consumergroups verbs: - create - delete @@ -166,37 +165,33 @@ rules: - update - watch - apiGroups: - - azure.microsoft.com + - apps resources: - - consumergroups/status + - deployments/status verbs: - get - patch - update - apiGroups: - - azure.microsoft.com + - service.azure resources: - - keyvaults + - cosmosdbs/status verbs: - - create - - delete - get - - list - patch - update - - watch - apiGroups: - - azure.microsoft.com + - service.azure resources: - - resourcegroups/status + - rediscaches/status verbs: - get - patch - update - apiGroups: - - "" + - apps resources: - - secrets + - deployments verbs: - create - delete @@ -206,9 +201,14 @@ rules: - update - watch - apiGroups: - - "" + - service.azure resources: - - events + - cosmosdbs verbs: - create + - delete + - get + - list + - patch + - update - watch diff --git a/config/samples/cosmosdb.yaml b/config/samples/azure_v1_cosmosdb.yaml similarity index 53% rename from config/samples/cosmosdb.yaml rename to config/samples/azure_v1_cosmosdb.yaml index 6b728d87db9..b9ebff1c4bf 100644 --- a/config/samples/cosmosdb.yaml +++ b/config/samples/azure_v1_cosmosdb.yaml @@ -1,9 +1,10 @@ -apiVersion: service.azure/v1alpha1 +apiVersion: azure.microsoft.com/v1 kind: CosmosDB metadata: - name: cosmosdb-sample + name: cosmosdb-sample123xyzkj spec: kind: GlobalDocumentDB location: westus + resourceGroup: resourcegroup-sample-1907 properties: databaseAccountOfferType: Standard diff --git a/config/samples/storage.yaml b/config/samples/azure_v1_storage.yaml similarity index 100% rename from config/samples/storage.yaml rename to config/samples/azure_v1_storage.yaml diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go index c4d2852a052..21c708369bc 100644 --- a/controllers/cosmosdb_controller.go +++ b/controllers/cosmosdb_controller.go @@ -26,38 +26,51 @@ package controllers import ( "context" + "fmt" + "os" + "strconv" "time" "github.com/go-logr/logr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" - "github.com/Azure/azure-service-operator/pkg/client/deployment" - "github.com/Azure/azure-service-operator/pkg/client/group" - "github.com/Azure/azure-service-operator/pkg/config" - cosmosdbtemplate "github.com/Azure/azure-service-operator/pkg/cosmosdb" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/errhelp" "github.com/Azure/azure-service-operator/pkg/helpers" - "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/cosmosdbs" + //"github.com/Azure/go-autorest/autorest/to" + "k8s.io/client-go/tools/record" ) +const cosmosDBFinalizerName = "cosmosdb.finalizers.azure.com" + // CosmosDBReconciler reconciles a CosmosDB object type CosmosDBReconciler struct { client.Client - Log logr.Logger + Log logr.Logger + Recorder record.EventRecorder + RequeueTime time.Duration } // +kubebuilder:rbac:groups=service.azure,resources=cosmosdbs,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=service.azure,resources=cosmosdbs/status,verbs=get;update;patch +// Reconcile function does the main reconciliation loop of the operator func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { ctx := context.Background() log := r.Log.WithValues("cosmosdb", req.NamespacedName) // Fetch the CosmosDB instance - instance := &servicev1alpha1.CosmosDB{} - err := r.Get(ctx, req.NamespacedName, instance) + //instance := &servicev1alpha1.CosmosDB{} + var instance azurev1.CosmosDB + + requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) if err != nil { + requeueAfter = 30 + } + + if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { log.Error(err, "unable to fetch CosmosDB") // we'll ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -67,7 +80,7 @@ func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { log.Info("Getting CosmosDB Account", "CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) log.V(1).Info("Describing CosmosDB Account", "CosmosDB", instance) - cosmosDBFinalizerName := "cosmosdb.finalizers.azure" + /* Before Refactor // examine DeletionTimestamp to determine if object is under deletion if instance.ObjectMeta.DeletionTimestamp.IsZero() { // The object is not being deleted, so if it does not have our finalizer, @@ -97,8 +110,125 @@ func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { } return ctrl.Result{}, err + */ + + if helpers.IsBeingDeleted(&instance) { + if helpers.HasFinalizer(&instance, cosmosDBFinalizerName) { + if err := r.deleteExternal(&instance); err != nil { + log.Info("Delete CosmosDB failed with ", err.Error()) + return ctrl.Result{}, err + } + + helpers.RemoveFinalizer(&instance, cosmosDBFinalizerName) + if err := r.Update(context.Background(), &instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil + } + + if !helpers.HasFinalizer(&instance, cosmosDBFinalizerName) { + if err := r.addFinalizer(&instance); err != nil { + log.Info("Adding cosmosDB finalizer failed with ", err.Error()) + return ctrl.Result{}, err + } + } + + if !instance.IsSubmitted() { + if err := r.reconcileExternal(&instance); err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + log.Info("Requeuing as the async operation is not complete") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second * time.Duration(requeueAfter), + }, nil + } + return ctrl.Result{}, fmt.Errorf("error reconciling cosmosdb in azure: %v", err) + } + return ctrl.Result{}, nil + } + + r.Recorder.Event(&instance, "Normal", "Provisioned", "CosmosDB "+instance.ObjectMeta.Name+" provisioned ") + return ctrl.Result{}, nil +} + +func (r *CosmosDBReconciler) addFinalizer(instance *azurev1.CosmosDB) error { + helpers.AddFinalizer(instance, cosmosDBFinalizerName) + err := r.Update(context.Background(), instance) + if err != nil { + return fmt.Errorf("failed to update finalizer: %v", err) + } + r.Recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("finalizer %s added", cosmosDBFinalizerName)) + return nil +} + +func (r *CosmosDBReconciler) reconcileExternal(instance *azurev1.CosmosDB) error { + ctx := context.Background() + location := instance.Spec.Location + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + kind := instance.Spec.Kind + dbType := instance.Spec.Properties.DatabaseAccountOfferType + + // write information back to instance + instance.Status.Provisioning = true + + if err := r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + _, err := cosmosdbs.CreateCosmosDB(ctx, groupName, name, location, kind, dbType, nil) + if err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + r.Recorder.Event(instance, "Normal", "Provisioning", name+" provisioning") + return err + } + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") + instance.Status.Provisioning = false + errUpdate := r.Status().Update(ctx, instance) + if errUpdate != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + return err + } + + instance.Status.Provisioning = false + instance.Status.Provisioned = true + + if err = r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + return nil +} + +func (r *CosmosDBReconciler) deleteExternal(instance *azurev1.CosmosDB) error { + ctx := context.Background() + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + _, err := cosmosdbs.DeleteCosmosDB(ctx, groupName, name) + if err != nil { + if errhelp.IsStatusCode204(err) { + r.Recorder.Event(instance, "Warning", "DoesNotExist", "Resource to delete does not exist") + return nil + } + + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't delete resouce in azure") + return err } + r.Recorder.Event(instance, "Normal", "Deleted", name+" deleted") + return nil +} + +// SetupWithManager sets up the controller functions +func (r *CosmosDBReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&azurev1.CosmosDB{}). + Complete(r) +} + +/* Before Refactor resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "cosmosdb", instance.Name, instance.Namespace) deploymentName := instance.Status.DeploymentName if deploymentName != "" { @@ -239,4 +369,4 @@ func (r *CosmosDBReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, } return nil -} +}*/ diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index 4d7f3fa6c0e..dbb40772712 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -152,7 +152,7 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { RequeueAfter: time.Second * time.Duration(requeueAfter), }, nil } - return ctrl.Result{}, fmt.Errorf("error reconciling keyvault in azure: %v", err) + return ctrl.Result{}, fmt.Errorf("error reconciling storage in azure: %v", err) } return ctrl.Result{}, nil } diff --git a/main.go b/main.go index 286baf968a8..6ae04cb84f1 100644 --- a/main.go +++ b/main.go @@ -60,30 +60,6 @@ func init() { // +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get;update;patch func main() { - // var metricsAddr string - // var enableLeaderElection bool - // pflag.StringVarP(&metricsAddr, "metrics-addr", "", ":8080", "The address the metric endpoint binds to.") - // pflag.BoolVarP(&enableLeaderElection, "enable-leader-election", "", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") - - // pflag.StringVarP(&masterURL, "master-url", "", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig.") - // pflag.StringVarP(&kubeconfig, "kubeconfig", "k", "", "Path to local kubeconfig file (mainly used for development)") - // pflag.StringVarP(&resources, "resources", "", "storage,cosmosdb", "Comma delimited list of CRDs to deploy") - // pflag.StringVarP(&clusterName, "cluster-name", "i", "azure-operator", "Cluster name for the Application to run as, used to avoid conflict") - // pflag.StringVarP(&cloudName, "cloud-name", "c", "AzurePublicCloud", "The cloud name") - // pflag.StringVarP(&tenantID, "tenant-id", "t", "", "The AAD tenant, must provide when using service principals") - // pflag.StringVarP(&subscriptionID, "subscription-id", "s", "", "The subscription ID") - // pflag.StringVarP(&clientID, "client-id", "u", "", "The service principal client ID") - // pflag.StringVarP(&clientSecret, "client-secret", "p", "", "The service principal client secret") - // pflag.BoolVarP(&useAADPodIdentity, "use-aad-pod-identity", "", false, "whether use AAD pod identity") - // pflag.Parse() - - // ctrl.SetLogger(zap.Logger(true)) - // flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") - // flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, - // "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") - - // flag.Parse() - var metricsAddr string var enableLeaderElection bool flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") @@ -114,8 +90,9 @@ func main() { os.Exit(1) } err = (&controllers.CosmosDBReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("CosmosDB"), + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("CosmosDB"), + Recorder: mgr.GetEventRecorderFor("CosmosDB-controller"), }).SetupWithManager(mgr) if err != nil { setupLog.Error(err, "unable to create controller", "controller", "CosmosDB") diff --git a/pkg/cosmosdb/cosmosdb_template.go b/pkg/cosmosdb/cosmosdb_template.go deleted file mode 100644 index f1a3f0d97c5..00000000000 --- a/pkg/cosmosdb/cosmosdb_template.go +++ /dev/null @@ -1,45 +0,0 @@ -package cosmosdb - -import ( - "context" - "encoding/json" - - uuid "github.com/satori/go.uuid" - - azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" - "github.com/Azure/azure-service-operator/pkg/client/deployment" - "github.com/Azure/azure-service-operator/pkg/template" -) - -// New generates a new object -func New(cosmosdb *azureV1alpha1.CosmosDB) *Template { - return &Template{ - CosmosDB: cosmosdb, - } -} - -// Template defines the dynamodb cfts -type Template struct { - CosmosDB *azureV1alpha1.CosmosDB -} - -func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { - deploymentName := uuid.NewV4().String() - asset, err := template.Asset("cosmosdb.json") - templateContents := make(map[string]interface{}) - json.Unmarshal(asset, &templateContents) - params := map[string]interface{}{ - "location": map[string]interface{}{ - "value": t.CosmosDB.Spec.Location, - }, - "kind": map[string]interface{}{ - "value": t.CosmosDB.Spec.Kind, - }, - "properties": map[string]interface{}{ - "value": t.CosmosDB.Spec.Properties, - }, - } - - err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) - return deploymentName, err -} diff --git a/pkg/resourcemanager/cosmosdbs/cosmosdbs.go b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go new file mode 100644 index 00000000000..20b28f8c854 --- /dev/null +++ b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go @@ -0,0 +1,138 @@ +package cosmosdbs + +import ( + "context" + //"encoding/json" + //"errors" + "fmt" + "log" + + //uuid "github.com/satori/go.uuid" + + "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2015-04-08/documentdb" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + //"github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" +) + +func getCosmosDBClient() documentdb.DatabaseAccountsClient { + cosmosDBClient := documentdb.NewDatabaseAccountsClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + cosmosDBClient.Authorizer = a + cosmosDBClient.AddToUserAgent(config.UserAgent()) + return cosmosDBClient +} + +// CreateCosmosDB creates a new CosmosDB +func CreateCosmosDB(ctx context.Context, groupName string, + cosmosDBName string, + location string, + kind azurev1.CosmosDBKind, + dbType azurev1.CosmosDBDatabaseAccountOfferType, + tags map[string]*string) (documentdb.DatabaseAccount, error) { + cosmosDBClient := getCosmosDBClient() + + log.Println("CosmosDB:CosmosDBName" + cosmosDBName) + + /* Uncomment and update if we should be checking for name exists first + result, err = cosmosDBClient.CheckNameExists(ctx, cosmosDBName) + if err != nil { + return documentdb.DatabaseAccount.{}, err + } + result. + if *result.NameAvailable == false { + log.Fatalf("storage account not available: %v\n", result.Reason) + return storage.Account{}, errors.New("storage account not available") + }*/ + + dbKind := documentdb.DatabaseAccountKind(kind) + + sDBType := string(dbType) + + testTags := map[string]*string{ + "name": to.StringPtr(cosmosDBName), + "namespace": to.StringPtr(cosmosDBName), + "kind": to.StringPtr("cosmos"), + } + + locationObj := documentdb.Location{ + ID: to.StringPtr(fmt.Sprintf("%s-%s", cosmosDBName, location)), + FailoverPriority: to.Int32Ptr(0), + LocationName: to.StringPtr(location), + } + + locationsArray := []documentdb.Location{ + locationObj, + } + + createUpdateParams := documentdb.DatabaseAccountCreateUpdateParameters{ + Location: to.StringPtr(location), + Tags: testTags, + Name: &cosmosDBName, + Kind: dbKind, + Type: to.StringPtr("Microsoft.DocumentDb/databaseAccounts"), + ID: &cosmosDBName, + DatabaseAccountCreateUpdateProperties: &documentdb.DatabaseAccountCreateUpdateProperties{ + DatabaseAccountOfferType: &sDBType, + EnableMultipleWriteLocations: to.BoolPtr(false), + IsVirtualNetworkFilterEnabled: to.BoolPtr(false), + Locations: &locationsArray, + }, + } + + log.Println(fmt.Sprintf("creating cosmosDB '%s' in resource group '%s' and location: %v", cosmosDBName, groupName, location)) + + future, err := cosmosDBClient.CreateOrUpdate( + ctx, groupName, cosmosDBName, createUpdateParams) + if err != nil { + log.Println(fmt.Sprintf("ERROR creating cosmosDB '%s' in resource group '%s' and location: %v", cosmosDBName, groupName, location)) + log.Println(fmt.Printf("failed to initialize cosmosdb: %v\n", err)) + } + return future.Result(cosmosDBClient) +} + +// DeleteCosmosDB removes the resource group named by env var +func DeleteCosmosDB(ctx context.Context, groupName string, cosmosDBName string) (result documentdb.DatabaseAccountsDeleteFuture, err error) { + cosmosDBClient := getCosmosDBClient() + return cosmosDBClient.Delete(ctx, groupName, cosmosDBName) +} + +/* Pre-Refactor +// New generates a new object +func New(cosmosdb *azureV1alpha1.CosmosDB) *Template { + return &Template{ + CosmosDB: cosmosdb, + } +} + +// Template defines the dynamodb cfts +type Template struct { + CosmosDB *azureV1alpha1.CosmosDB +} + +func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { + deploymentName := uuid.NewV4().String() + asset, err := template.Asset("cosmosdb.json") + templateContents := make(map[string]interface{}) + json.Unmarshal(asset, &templateContents) + params := map[string]interface{}{ + "location": map[string]interface{}{ + "value": t.CosmosDB.Spec.Location, + }, + "kind": map[string]interface{}{ + "value": t.CosmosDB.Spec.Kind, + }, + "properties": map[string]interface{}{ + "value": t.CosmosDB.Spec.Properties, + }, + } + + err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) + return deploymentName, err +} +*/ diff --git a/pkg/resourcemanager/storages/storages.go b/pkg/resourcemanager/storages/storages.go index 2e6abe8d963..7f52314463e 100644 --- a/pkg/resourcemanager/storages/storages.go +++ b/pkg/resourcemanager/storages/storages.go @@ -98,6 +98,13 @@ func CreateStorage(ctx context.Context, groupName string, return future.Result(storagesClient) } +// DeleteStorage removes the resource group named by env var +func DeleteStorage(ctx context.Context, groupName string, storageAccountName string) (result autorest.Response, err error) { + storagesClient := getStoragesClient() + return storagesClient.Delete(ctx, groupName, storageAccountName) +} + +// Pre-Refactor // func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { // deploymentName := uuid.NewV4().String() // asset, err := template.Asset("storage.json") @@ -124,9 +131,3 @@ func CreateStorage(ctx context.Context, groupName string, // err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) // return deploymentName, err // } - -// DeleteStorage removes the resource group named by env var -func DeleteStorage(ctx context.Context, groupName string, storageAccountName string) (result autorest.Response, err error) { - storagesClient := getStoragesClient() - return storagesClient.Delete(ctx, groupName, storageAccountName) -} From 540b66d09252d8f9691b762babcc3bd11263b4e8 Mon Sep 17 00:00:00 2001 From: Chris Risner Date: Wed, 28 Aug 2019 14:25:46 -0600 Subject: [PATCH 30/34] Detailing current implementation of CosmosDB Create parameters --- pkg/resourcemanager/cosmosdbs/cosmosdbs.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pkg/resourcemanager/cosmosdbs/cosmosdbs.go b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go index 20b28f8c854..dc257a7f1f0 100644 --- a/pkg/resourcemanager/cosmosdbs/cosmosdbs.go +++ b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go @@ -60,6 +60,19 @@ func CreateCosmosDB(ctx context.Context, groupName string, "kind": to.StringPtr("cosmos"), } + /* + * Current state of Locations and CosmosDB properties: + * Creating a Database account with CosmosDB requires + * that DatabaseAccountCreateUpdateProperties be sent over + * and currently we are not reading most of these values in + * as part of the Spec for CosmosDB. We are currently + * specifying a single Location as part of a location array + * which matches the location set for the overall CosmosDB + * instance. This matches the general behavior of creating + * a CosmosDB instance in the portal where the only + * geo-relicated region is the sole region the CosmosDB + * is created in. + */ locationObj := documentdb.Location{ ID: to.StringPtr(fmt.Sprintf("%s-%s", cosmosDBName, location)), FailoverPriority: to.Int32Ptr(0), From 357f8fa3d2dc8156ad40a2cd13da32f808d6f39e Mon Sep 17 00:00:00 2001 From: Chris Risner Date: Wed, 28 Aug 2019 14:29:52 -0600 Subject: [PATCH 31/34] Removing TestTags --- pkg/resourcemanager/cosmosdbs/cosmosdbs.go | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/pkg/resourcemanager/cosmosdbs/cosmosdbs.go b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go index dc257a7f1f0..560ba420e29 100644 --- a/pkg/resourcemanager/cosmosdbs/cosmosdbs.go +++ b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go @@ -54,15 +54,9 @@ func CreateCosmosDB(ctx context.Context, groupName string, sDBType := string(dbType) - testTags := map[string]*string{ - "name": to.StringPtr(cosmosDBName), - "namespace": to.StringPtr(cosmosDBName), - "kind": to.StringPtr("cosmos"), - } - /* * Current state of Locations and CosmosDB properties: - * Creating a Database account with CosmosDB requires + * Creating a Database account with CosmosDB requires * that DatabaseAccountCreateUpdateProperties be sent over * and currently we are not reading most of these values in * as part of the Spec for CosmosDB. We are currently @@ -71,8 +65,8 @@ func CreateCosmosDB(ctx context.Context, groupName string, * instance. This matches the general behavior of creating * a CosmosDB instance in the portal where the only * geo-relicated region is the sole region the CosmosDB - * is created in. - */ + * is created in. + */ locationObj := documentdb.Location{ ID: to.StringPtr(fmt.Sprintf("%s-%s", cosmosDBName, location)), FailoverPriority: to.Int32Ptr(0), @@ -85,7 +79,7 @@ func CreateCosmosDB(ctx context.Context, groupName string, createUpdateParams := documentdb.DatabaseAccountCreateUpdateParameters{ Location: to.StringPtr(location), - Tags: testTags, + Tags: tags, Name: &cosmosDBName, Kind: dbKind, Type: to.StringPtr("Microsoft.DocumentDb/databaseAccounts"), @@ -98,7 +92,7 @@ func CreateCosmosDB(ctx context.Context, groupName string, }, } - log.Println(fmt.Sprintf("creating cosmosDB '%s' in resource group '%s' and location: %v", cosmosDBName, groupName, location)) + log.Println(fmt.Sprintf("creating cosmosDB '%s' in resource group '%s' and location: %v", cosmosDBName, groupName, location)) future, err := cosmosDBClient.CreateOrUpdate( ctx, groupName, cosmosDBName, createUpdateParams) From 604255bcdae6a7bf3d94489b4d3f8131dfc99063 Mon Sep 17 00:00:00 2001 From: Chris Risner Date: Wed, 28 Aug 2019 16:19:01 -0600 Subject: [PATCH 32/34] Redis cache now deploys --- PROJECT | 2 +- api/{v1alpha1 => v1}/rediscache_types.go | 22 ++- api/v1/zz_generated.deepcopy.go | 158 +++++++++++++++ api/v1alpha1/zz_generated.deepcopy.go | 182 ------------------ ...l => azure.microsoft.com_rediscaches.yaml} | 24 ++- config/rbac/role.yaml | 100 +++++----- ...discache.yaml => azure_v1_rediscache.yaml} | 7 +- controllers/rediscache_controller.go | 153 +++++++++++++-- main.go | 5 +- pkg/rediscache/rediscache_template.go | 51 ----- .../rediscaches/rediscaches.go | 132 +++++++++++++ 11 files changed, 518 insertions(+), 318 deletions(-) rename api/{v1alpha1 => v1}/rediscache_types.go (85%) delete mode 100644 api/v1alpha1/zz_generated.deepcopy.go rename config/crd/bases/{service.azure_rediscaches.yaml => azure.microsoft.com_rediscaches.yaml} (98%) rename config/samples/{rediscache.yaml => azure_v1_rediscache.yaml} (50%) delete mode 100644 pkg/rediscache/rediscache_template.go create mode 100644 pkg/resourcemanager/rediscaches/rediscaches.go diff --git a/PROJECT b/PROJECT index 018a3253e57..bf33b59a4d7 100644 --- a/PROJECT +++ b/PROJECT @@ -9,7 +9,7 @@ resources: version: v1 kind: CosmosDB - group: service - version: v1alpha1 + version: v1 kind: RedisCache - group: azure version: v1 diff --git a/api/v1alpha1/rediscache_types.go b/api/v1/rediscache_types.go similarity index 85% rename from api/v1alpha1/rediscache_types.go rename to api/v1/rediscache_types.go index 1fdb39a8f85..826d42ed068 100644 --- a/api/v1alpha1/rediscache_types.go +++ b/api/v1/rediscache_types.go @@ -22,7 +22,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,9 +38,9 @@ type RedisCacheSpec struct { // +kubebuilder:validation:MinLength=0 - Location string `json:"location,omitempty"` - - Properties RedisCacheProperties `json:"properties,omitempty"` + Location string `json:"location,omitempty"` + ResourceGroupName string `json:"resourceGroup"` + Properties RedisCacheProperties `json:"properties,omitempty"` } // RedisCacheProperties the properties of the Redis Cache. @@ -58,7 +58,7 @@ type RedisCacheSku struct { Family RedisCacheSkuFamily `json:"family,omitempty"` - Capacity int `json:"capacity,omitempty"` + Capacity int32 `json:"capacity,omitempty"` } type RedisCacheSkuName string @@ -81,9 +81,11 @@ type RedisCacheStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file - DeploymentName string `json:"deploymentName,omitempty"` - ProvisioningState string `json:"provisioningState,omitempty"` - Generation int64 `json:"generation,omitempty"` + // DeploymentName string `json:"deploymentName,omitempty"` + // ProvisioningState string `json:"provisioningState,omitempty"` + // Generation int64 `json:"generation,omitempty"` + Provisioning bool `json:"provisioning,omitempty"` + Provisioned bool `json:"provisioned,omitempty"` } type RedisCacheOutput struct { @@ -123,3 +125,7 @@ type RedisCacheList struct { func init() { SchemeBuilder.Register(&RedisCache{}, &RedisCacheList{}) } + +func (redisCache *RedisCache) IsSubmitted() bool { + return redisCache.Status.Provisioning || redisCache.Status.Provisioned +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 5fd49d60399..2f5e28346aa 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -607,6 +607,164 @@ func (in *KeyVaultStatus) DeepCopy() *KeyVaultStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCache) DeepCopyInto(out *RedisCache) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCache. +func (in *RedisCache) DeepCopy() *RedisCache { + if in == nil { + return nil + } + out := new(RedisCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCache) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheAdditionalResources) DeepCopyInto(out *RedisCacheAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheAdditionalResources. +func (in *RedisCacheAdditionalResources) DeepCopy() *RedisCacheAdditionalResources { + if in == nil { + return nil + } + out := new(RedisCacheAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheList) DeepCopyInto(out *RedisCacheList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RedisCache, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheList. +func (in *RedisCacheList) DeepCopy() *RedisCacheList { + if in == nil { + return nil + } + out := new(RedisCacheList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCacheList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheOutput) DeepCopyInto(out *RedisCacheOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheOutput. +func (in *RedisCacheOutput) DeepCopy() *RedisCacheOutput { + if in == nil { + return nil + } + out := new(RedisCacheOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheProperties) DeepCopyInto(out *RedisCacheProperties) { + *out = *in + out.Sku = in.Sku +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheProperties. +func (in *RedisCacheProperties) DeepCopy() *RedisCacheProperties { + if in == nil { + return nil + } + out := new(RedisCacheProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheSku) DeepCopyInto(out *RedisCacheSku) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSku. +func (in *RedisCacheSku) DeepCopy() *RedisCacheSku { + if in == nil { + return nil + } + out := new(RedisCacheSku) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheSpec) DeepCopyInto(out *RedisCacheSpec) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSpec. +func (in *RedisCacheSpec) DeepCopy() *RedisCacheSpec { + if in == nil { + return nil + } + out := new(RedisCacheSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheStatus) DeepCopyInto(out *RedisCacheStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheStatus. +func (in *RedisCacheStatus) DeepCopy() *RedisCacheStatus { + if in == nil { + return nil + } + out := new(RedisCacheStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceGroup) DeepCopyInto(out *ResourceGroup) { *out = *in diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index ae1c92ad805..00000000000 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build !ignore_autogenerated - -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// autogenerated by controller-gen object, do not modify manually - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisCache) DeepCopyInto(out *RedisCache) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - out.Output = in.Output - in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCache. -func (in *RedisCache) DeepCopy() *RedisCache { - if in == nil { - return nil - } - out := new(RedisCache) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RedisCache) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisCacheAdditionalResources) DeepCopyInto(out *RedisCacheAdditionalResources) { - *out = *in - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheAdditionalResources. -func (in *RedisCacheAdditionalResources) DeepCopy() *RedisCacheAdditionalResources { - if in == nil { - return nil - } - out := new(RedisCacheAdditionalResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisCacheList) DeepCopyInto(out *RedisCacheList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RedisCache, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheList. -func (in *RedisCacheList) DeepCopy() *RedisCacheList { - if in == nil { - return nil - } - out := new(RedisCacheList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RedisCacheList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisCacheOutput) DeepCopyInto(out *RedisCacheOutput) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheOutput. -func (in *RedisCacheOutput) DeepCopy() *RedisCacheOutput { - if in == nil { - return nil - } - out := new(RedisCacheOutput) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisCacheProperties) DeepCopyInto(out *RedisCacheProperties) { - *out = *in - out.Sku = in.Sku -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheProperties. -func (in *RedisCacheProperties) DeepCopy() *RedisCacheProperties { - if in == nil { - return nil - } - out := new(RedisCacheProperties) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisCacheSku) DeepCopyInto(out *RedisCacheSku) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSku. -func (in *RedisCacheSku) DeepCopy() *RedisCacheSku { - if in == nil { - return nil - } - out := new(RedisCacheSku) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisCacheSpec) DeepCopyInto(out *RedisCacheSpec) { - *out = *in - out.Properties = in.Properties -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSpec. -func (in *RedisCacheSpec) DeepCopy() *RedisCacheSpec { - if in == nil { - return nil - } - out := new(RedisCacheSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisCacheStatus) DeepCopyInto(out *RedisCacheStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheStatus. -func (in *RedisCacheStatus) DeepCopy() *RedisCacheStatus { - if in == nil { - return nil - } - out := new(RedisCacheStatus) - in.DeepCopyInto(out) - return out -} diff --git a/config/crd/bases/service.azure_rediscaches.yaml b/config/crd/bases/azure.microsoft.com_rediscaches.yaml similarity index 98% rename from config/crd/bases/service.azure_rediscaches.yaml rename to config/crd/bases/azure.microsoft.com_rediscaches.yaml index 66430e765e9..ede9cc107fa 100644 --- a/config/crd/bases/service.azure_rediscaches.yaml +++ b/config/crd/bases/azure.microsoft.com_rediscaches.yaml @@ -4,9 +4,9 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: creationTimestamp: null - name: rediscaches.service.azure + name: rediscaches.azure.microsoft.com spec: - group: service.azure + group: azure.microsoft.com names: kind: RedisCache plural: rediscaches @@ -430,6 +430,7 @@ spec: description: RedisCacheSku the SKU of the Redis Cache. properties: capacity: + format: int32 type: integer family: type: string @@ -441,21 +442,24 @@ spec: type: string type: object type: object + resourceGroup: + type: string + required: + - resourceGroup type: object status: description: RedisCacheStatus defines the observed state of RedisCache properties: - deploymentName: - type: string - generation: - format: int64 - type: integer - provisioningState: - type: string + provisioned: + type: boolean + provisioning: + description: DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` Generation int64 `json:"generation,omitempty"` + type: boolean type: object type: object versions: - - name: v1alpha1 + - name: v1 served: true storage: true status: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 2b7d235432e..9f275c1a521 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -7,9 +7,9 @@ metadata: name: manager-role rules: - apiGroups: - - service.azure + - azure.microsoft.com resources: - - rediscaches + - resourcegroups verbs: - create - delete @@ -21,27 +21,26 @@ rules: - apiGroups: - azure.microsoft.com resources: - - resourcegroups + - events verbs: - create - - delete - - get - - list - patch - - update - - watch - apiGroups: - - azure.microsoft.com + - service.azure resources: - - eventhubs/status + - cosmosdbs verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - azure.microsoft.com resources: - - eventhubnamespaces + - eventhubs verbs: - create - delete @@ -53,23 +52,23 @@ rules: - apiGroups: - azure.microsoft.com resources: - - eventhubnamespaces/status + - eventhubs/status verbs: - get - patch - update - apiGroups: - - service.azure + - azure.microsoft.com resources: - - storages/status + - keyvaults/status verbs: - get - patch - update - apiGroups: - - "" + - service.azure resources: - - secrets + - rediscaches verbs: - create - delete @@ -79,29 +78,25 @@ rules: - update - watch - apiGroups: - - azure.microsoft.com + - service.azure resources: - - eventhubs + - rediscaches/status verbs: - - create - - delete - get - - list - patch - update - - watch - apiGroups: - azure.microsoft.com resources: - - consumergroups/status + - resourcegroups/status verbs: - get - patch - update - apiGroups: - - azure.microsoft.com + - service.azure resources: - - keyvaults + - storages verbs: - create - delete @@ -111,25 +106,25 @@ rules: - update - watch - apiGroups: - - azure.microsoft.com + - service.azure resources: - - keyvaults/status + - storages/status verbs: - get - patch - update - apiGroups: - - azure.microsoft.com + - service.azure resources: - - resourcegroups/status + - cosmosdbs/status verbs: - get - patch - update - apiGroups: - - service.azure + - azure.microsoft.com resources: - - storages + - eventhubnamespaces verbs: - create - delete @@ -141,17 +136,23 @@ rules: - apiGroups: - "" resources: - - events + - secrets verbs: - create + - delete + - get + - list + - patch + - update - watch - apiGroups: - - azure.microsoft.com + - apps resources: - - events + - deployments/status verbs: - - create + - get - patch + - update - apiGroups: - azure.microsoft.com resources: @@ -165,45 +166,44 @@ rules: - update - watch - apiGroups: - - apps + - azure.microsoft.com resources: - - deployments/status + - consumergroups/status verbs: - get - patch - update - apiGroups: - - service.azure + - azure.microsoft.com resources: - - cosmosdbs/status + - eventhubnamespaces/status verbs: - get - patch - update - apiGroups: - - service.azure + - azure.microsoft.com resources: - - rediscaches/status + - keyvaults verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - apps + - "" resources: - - deployments + - events verbs: - create - - delete - - get - - list - - patch - - update - watch - apiGroups: - - service.azure + - apps resources: - - cosmosdbs + - deployments verbs: - create - delete diff --git a/config/samples/rediscache.yaml b/config/samples/azure_v1_rediscache.yaml similarity index 50% rename from config/samples/rediscache.yaml rename to config/samples/azure_v1_rediscache.yaml index 9dc728d775e..54e680889a8 100644 --- a/config/samples/rediscache.yaml +++ b/config/samples/azure_v1_rediscache.yaml @@ -1,9 +1,10 @@ -apiVersion: service.azure/v1alpha1 +apiVersion: azure.microsoft.com/v1 kind: RedisCache metadata: - name: rediscache-sample + name: rediscache-sample123xyzkj spec: - location: eastus2 + location: westus + resourceGroup: resourcegroup-sample-1907 properties: sku: name: Basic diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go index 444eeeaa25c..210a226d060 100644 --- a/controllers/rediscache_controller.go +++ b/controllers/rediscache_controller.go @@ -26,25 +26,31 @@ package controllers import ( "context" + "fmt" + "os" + "strconv" "time" "github.com/go-logr/logr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" - "github.com/Azure/azure-service-operator/pkg/client/deployment" - "github.com/Azure/azure-service-operator/pkg/client/group" - "github.com/Azure/azure-service-operator/pkg/config" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/errhelp" "github.com/Azure/azure-service-operator/pkg/helpers" - redisCacheTemplate "github.com/Azure/azure-service-operator/pkg/rediscache" - "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/rediscaches" + //"github.com/Azure/go-autorest/autorest/to" + "k8s.io/client-go/tools/record" ) +const redisCacheFinalizerName = "rediscache.finalizers.azure.com" + // RedisCacheReconciler reconciles a RedisCache object type RedisCacheReconciler struct { client.Client - Log logr.Logger + Log logr.Logger + Recorder record.EventRecorder + RequeueTime time.Duration } // +kubebuilder:rbac:groups=service.azure,resources=rediscaches,verbs=get;list;watch;create;update;patch;delete @@ -55,19 +61,25 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) log := r.Log.WithValues("rediscache", req.NamespacedName) // Fetch the Redis Cache instance - instance := &servicev1alpha1.RedisCache{} - err := r.Get(ctx, req.NamespacedName, instance) + //instance := &servicev1alpha1.RedisCache{} + var instance azurev1.RedisCache + + requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) if err != nil { + requeueAfter = 30 + } + + if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { log.Error(err, "unable to fetch RedisCache") // we'll ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them // on deleted requests. - return ctrl.Result{}, helpers.IgnoreKubernetesResourceNotFound(err) + return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("Getting Redis Cache", "RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) log.V(1).Info("Describing Redis Cache", "RedisCache", instance) - redisCacheFinalizerName := "redisCache.finalizers.azure" + /* Pre refactor // examine DeletionTimestamp to determine if object is under deletion if instance.ObjectMeta.DeletionTimestamp.IsZero() { // The object is not being deleted, so if it does not have our finalizer, @@ -97,6 +109,7 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) } return ctrl.Result{}, err + } resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "redisCache", instance.Name, instance.Namespace) @@ -152,8 +165,125 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) // Redis Cache created successfully - don't requeue return ctrl.Result{}, nil + */ + + if helpers.IsBeingDeleted(&instance) { + if helpers.HasFinalizer(&instance, redisCacheFinalizerName) { + if err := r.deleteExternal(&instance); err != nil { + log.Info("Delete Redis Cache failed with ", err.Error()) + return ctrl.Result{}, err + } + + helpers.RemoveFinalizer(&instance, redisCacheFinalizerName) + if err := r.Update(context.Background(), &instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil + } + + if !helpers.HasFinalizer(&instance, redisCacheFinalizerName) { + if err := r.addFinalizer(&instance); err != nil { + log.Info("Adding redis cache finalizer failed with ", err.Error()) + return ctrl.Result{}, err + } + } + + if !instance.IsSubmitted() { + if err := r.reconcileExternal(&instance); err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + log.Info("Requeuing as the async operation is not complete") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second * time.Duration(requeueAfter), + }, nil + } + return ctrl.Result{}, fmt.Errorf("error reconciling redis cache in azure: %v", err) + } + return ctrl.Result{}, nil + } + + r.Recorder.Event(&instance, "Normal", "Provisioned", "RedisCache "+instance.ObjectMeta.Name+" provisioned ") + return ctrl.Result{}, nil } +func (r *RedisCacheReconciler) addFinalizer(instance *azurev1.RedisCache) error { + helpers.AddFinalizer(instance, redisCacheFinalizerName) + err := r.Update(context.Background(), instance) + if err != nil { + return fmt.Errorf("failed to update finalizer: %v", err) + } + r.Recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("finalizer %s added", redisCacheFinalizerName)) + return nil +} + +func (r *RedisCacheReconciler) reconcileExternal(instance *azurev1.RedisCache) error { + ctx := context.Background() + location := instance.Spec.Location + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + sku := instance.Spec.Properties.Sku + enableNonSSLPort := instance.Spec.Properties.EnableNonSslPort + + // write information back to instance + instance.Status.Provisioning = true + + if err := r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + _, err := rediscaches.CreateRedisCache(ctx, groupName, name, location, sku, enableNonSSLPort, nil) + if err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + r.Recorder.Event(instance, "Normal", "Provisioning", name+" provisioning") + return err + } + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") + instance.Status.Provisioning = false + errUpdate := r.Status().Update(ctx, instance) + if errUpdate != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + return err + } + + instance.Status.Provisioning = false + instance.Status.Provisioned = true + + if err = r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + return nil +} + +func (r *RedisCacheReconciler) deleteExternal(instance *azurev1.RedisCache) error { + ctx := context.Background() + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + _, err := rediscaches.DeleteRedisCache(ctx, groupName, name) + if err != nil { + if errhelp.IsStatusCode204(err) { + r.Recorder.Event(instance, "Warning", "DoesNotExist", "Resource to delete does not exist") + return nil + } + + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't delete resouce in azure") + return err + } + + r.Recorder.Event(instance, "Normal", "Deleted", name+" deleted") + return nil +} + +// SetupWithManager sets up the controller functions +func (r *RedisCacheReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&azurev1.RedisCache{}). + Complete(r) +} + +/* Pre Refactor func (r *RedisCacheReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&servicev1alpha1.RedisCache{}). @@ -245,3 +375,4 @@ func (r *RedisCacheReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request return nil } +*/ diff --git a/main.go b/main.go index 6ae04cb84f1..c7cdeadeff7 100644 --- a/main.go +++ b/main.go @@ -99,8 +99,9 @@ func main() { os.Exit(1) } if err = (&controllers.RedisCacheReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("RedisCache"), + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("RedisCache"), + Recorder: mgr.GetEventRecorderFor("RedisCache-controller"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RedisCache") os.Exit(1) diff --git a/pkg/rediscache/rediscache_template.go b/pkg/rediscache/rediscache_template.go deleted file mode 100644 index 7c1fdf9d6db..00000000000 --- a/pkg/rediscache/rediscache_template.go +++ /dev/null @@ -1,51 +0,0 @@ -package rediscache - -import ( - "context" - "encoding/json" - - uuid "github.com/satori/go.uuid" - - azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" - "github.com/Azure/azure-service-operator/pkg/client/deployment" - "github.com/Azure/azure-service-operator/pkg/template" -) - -// New generates a new object -func New(redisCache *azureV1alpha1.RedisCache) *Template { - return &Template{ - RedisCache: redisCache, - } -} - -// Template defines the dynamodb cfts -type Template struct { - RedisCache *azureV1alpha1.RedisCache -} - -func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { - deploymentName := uuid.NewV4().String() - asset, err := template.Asset("rediscache.json") - templateContents := make(map[string]interface{}) - json.Unmarshal(asset, &templateContents) - params := map[string]interface{}{ - "location": map[string]interface{}{ - "value": t.RedisCache.Spec.Location, - }, - "properties.sku.name": map[string]interface{}{ - "value": t.RedisCache.Spec.Properties.Sku.Name, - }, - "properties.sku.family": map[string]interface{}{ - "value": t.RedisCache.Spec.Properties.Sku.Family, - }, - "properties.sku.capacity": map[string]interface{}{ - "value": t.RedisCache.Spec.Properties.Sku.Capacity, - }, - "properties.enableNonSslPort": map[string]interface{}{ - "value": t.RedisCache.Spec.Properties.EnableNonSslPort, - }, - } - - err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) - return deploymentName, err -} diff --git a/pkg/resourcemanager/rediscaches/rediscaches.go b/pkg/resourcemanager/rediscaches/rediscaches.go new file mode 100644 index 00000000000..77f3483333a --- /dev/null +++ b/pkg/resourcemanager/rediscaches/rediscaches.go @@ -0,0 +1,132 @@ +package rediscaches + +import ( + "context" + //"encoding/json" + "errors" + "fmt" + "log" + + //uuid "github.com/satori/go.uuid" + + "github.com/Azure/azure-sdk-for-go/services/redis/mgmt/2018-03-01/redis" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + "github.com/Azure/go-autorest/autorest/to" +) + +func getRedisCacheClient() redis.Client { + redisClient := redis.NewClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + redisClient.Authorizer = a + redisClient.AddToUserAgent(config.UserAgent()) + return redisClient +} + +// CreateRedisCache creates a new RedisCache +func CreateRedisCache(ctx context.Context, + groupName string, + redisCacheName string, + location string, + sku azurev1.RedisCacheSku, + enableNonSSLPort bool, + // kind azurev1.CosmosDBKind, + // dbType azurev1.CosmosDBDatabaseAccountOfferType, + tags map[string]*string) (redis.ResourceType, error) { + redisClient := getRedisCacheClient() + + log.Println("RedisCache:CacheName" + redisCacheName) + + //Check if name is available + redisType := "Microsoft.Cache/redis" + checkNameParams := redis.CheckNameAvailabilityParameters{ + Name: &redisCacheName, + Type: &redisType, + } + result, err := redisClient.CheckNameAvailability(ctx, checkNameParams) + if err != nil { + return redis.ResourceType{}, err + } + + if result.StatusCode != 200 { + log.Fatalf("redis cache name (%s) not available: %v\n", redisCacheName, result.Status) + return redis.ResourceType{}, errors.New("redis cache name not available") + } + + log.Println(fmt.Sprintf("creating rediscache '%s' in resource group '%s' and location: %v", redisCacheName, groupName, location)) + + redisSku := redis.Sku{ + Name: redis.SkuName(sku.Name), + Family: redis.SkuFamily(sku.Family), + Capacity: to.Int32Ptr(sku.Capacity), + } + + createParams := redis.CreateParameters{ + Location: to.StringPtr(location), + Tags: tags, + CreateProperties: &redis.CreateProperties{ + EnableNonSslPort: &enableNonSSLPort, + Sku: &redisSku, + }, + } + + future, err := redisClient.Create( + ctx, groupName, redisCacheName, createParams) + if err != nil { + log.Println(fmt.Sprintf("ERROR creating redisCache '%s' in resource group '%s' and location: %v", redisCacheName, groupName, location)) + log.Println(fmt.Printf("failed to initialize redis Cache: %v\n", err)) + } + return future.Result(redisClient) +} + +// DeleteRedisCache removes the resource group named by env var +func DeleteRedisCache(ctx context.Context, groupName string, redisCacheName string) (result redis.DeleteFuture, err error) { + redisClient := getRedisCacheClient() + return redisClient.Delete(ctx, groupName, redisCacheName) +} + +/* Before Refactor +// New generates a new object +func New(redisCache *azureV1alpha1.RedisCache) *Template { + return &Template{ + RedisCache: redisCache, + } +} + +// Template defines the dynamodb cfts +type Template struct { + RedisCache *azureV1alpha1.RedisCache +} + +func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { + deploymentName := uuid.NewV4().String() + asset, err := template.Asset("rediscache.json") + templateContents := make(map[string]interface{}) + json.Unmarshal(asset, &templateContents) + params := map[string]interface{}{ + "location": map[string]interface{}{ + "value": t.RedisCache.Spec.Location, + }, + "properties.sku.name": map[string]interface{}{ + "value": t.RedisCache.Spec.Properties.Sku.Name, + }, + "properties.sku.family": map[string]interface{}{ + "value": t.RedisCache.Spec.Properties.Sku.Family, + }, + "properties.sku.capacity": map[string]interface{}{ + "value": t.RedisCache.Spec.Properties.Sku.Capacity, + }, + "properties.enableNonSslPort": map[string]interface{}{ + "value": t.RedisCache.Spec.Properties.EnableNonSslPort, + }, + } + + err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) + return deploymentName, err +} + +*/ From 9c4d2e99ea521f4772b4d31feb89e7ecad705323 Mon Sep 17 00:00:00 2001 From: Chris Risner Date: Wed, 28 Aug 2019 16:48:57 -0600 Subject: [PATCH 33/34] Cleaned up code and removed references to v1alpha1 --- api/v1alpha1/groupversion_info.go | 44 ------ api/v1alpha1/suite_test.go | 83 ----------- config/rbac/role.yaml | 100 ++++++------- config/samples/azure_v1_cosmosdb.yaml | 4 +- config/samples/azure_v1_rediscache.yaml | 4 +- config/samples/azure_v1_resourcegroup.yaml | 2 +- config/samples/azure_v1_storage.yaml | 4 +- controllers/cosmosdb_controller.go | 123 +--------------- controllers/rediscache_controller.go | 122 +--------------- controllers/storage_controller.go | 134 +----------------- controllers/suite_test.go | 10 -- main.go | 4 - pkg/resourcemanager/cosmosdbs/cosmosdbs.go | 43 +----- .../rediscaches/rediscaches.go | 47 ------ pkg/resourcemanager/storages/storages.go | 60 +------- 15 files changed, 70 insertions(+), 714 deletions(-) delete mode 100644 api/v1alpha1/groupversion_info.go delete mode 100644 api/v1alpha1/suite_test.go diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go deleted file mode 100644 index e76004295af..00000000000 --- a/api/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -MIT License - -Copyright (c) Microsoft Corporation. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE -*/ - -// Package v1alpha1 contains API Schema definitions for the service v1alpha1 API group -// +kubebuilder:object:generate=true -// +groupName=service.azure -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "service.azure", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/api/v1alpha1/suite_test.go b/api/v1alpha1/suite_test.go deleted file mode 100644 index 6c8a9507359..00000000000 --- a/api/v1alpha1/suite_test.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -MIT License - -Copyright (c) Microsoft Corporation. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE -*/ - -package v1alpha1 - -import ( - "path/filepath" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "v1alpha1 Suite", - []Reporter{envtest.NewlineReporter{}}) -} - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, - } - - err := SchemeBuilder.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).ToNot(HaveOccurred()) - Expect(k8sClient).ToNot(BeNil()) - - close(done) -}, 60) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 9f275c1a521..23851d4e7eb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -7,9 +7,9 @@ metadata: name: manager-role rules: - apiGroups: - - azure.microsoft.com + - apps resources: - - resourcegroups + - deployments verbs: - create - delete @@ -19,16 +19,17 @@ rules: - update - watch - apiGroups: - - azure.microsoft.com + - apps resources: - - events + - deployments/status verbs: - - create + - get - patch + - update - apiGroups: - - service.azure + - azure.microsoft.com resources: - - cosmosdbs + - consumergroups verbs: - create - delete @@ -40,7 +41,7 @@ rules: - apiGroups: - azure.microsoft.com resources: - - eventhubs + - eventhubnamespaces verbs: - create - delete @@ -52,7 +53,7 @@ rules: - apiGroups: - azure.microsoft.com resources: - - eventhubs/status + - keyvaults/status verbs: - get - patch @@ -60,15 +61,15 @@ rules: - apiGroups: - azure.microsoft.com resources: - - keyvaults/status + - resourcegroups/status verbs: - get - patch - update - apiGroups: - - service.azure + - "" resources: - - rediscaches + - secrets verbs: - create - delete @@ -77,18 +78,10 @@ rules: - patch - update - watch -- apiGroups: - - service.azure - resources: - - rediscaches/status - verbs: - - get - - patch - - update - apiGroups: - azure.microsoft.com resources: - - resourcegroups/status + - consumergroups/status verbs: - get - patch @@ -96,7 +89,7 @@ rules: - apiGroups: - service.azure resources: - - storages + - cosmosdbs verbs: - create - delete @@ -106,25 +99,29 @@ rules: - update - watch - apiGroups: - - service.azure + - azure.microsoft.com resources: - - storages/status + - eventhubs verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - service.azure resources: - - cosmosdbs/status + - rediscaches/status verbs: - get - patch - update - apiGroups: - - azure.microsoft.com + - service.azure resources: - - eventhubnamespaces + - storages verbs: - create - delete @@ -134,41 +131,31 @@ rules: - update - watch - apiGroups: - - "" + - azure.microsoft.com resources: - - secrets + - events verbs: - create - - delete - - get - - list - patch - - update - - watch - apiGroups: - - apps + - "" resources: - - deployments/status + - events verbs: - - get - - patch - - update + - create + - watch - apiGroups: - - azure.microsoft.com + - service.azure resources: - - consumergroups + - cosmosdbs/status verbs: - - create - - delete - get - - list - patch - update - - watch - apiGroups: - azure.microsoft.com resources: - - consumergroups/status + - eventhubs/status verbs: - get - patch @@ -194,16 +181,21 @@ rules: - update - watch - apiGroups: - - "" + - service.azure resources: - - events + - rediscaches verbs: - create + - delete + - get + - list + - patch + - update - watch - apiGroups: - - apps + - azure.microsoft.com resources: - - deployments + - resourcegroups verbs: - create - delete @@ -212,3 +204,11 @@ rules: - patch - update - watch +- apiGroups: + - service.azure + resources: + - storages/status + verbs: + - get + - patch + - update diff --git a/config/samples/azure_v1_cosmosdb.yaml b/config/samples/azure_v1_cosmosdb.yaml index b9ebff1c4bf..0741c205517 100644 --- a/config/samples/azure_v1_cosmosdb.yaml +++ b/config/samples/azure_v1_cosmosdb.yaml @@ -1,10 +1,10 @@ apiVersion: azure.microsoft.com/v1 kind: CosmosDB metadata: - name: cosmosdb-sample123xyzkj + name: cosmosdb-sample1908xyzkj spec: kind: GlobalDocumentDB location: westus - resourceGroup: resourcegroup-sample-1907 + resourceGroup: resourcegroup-sample-1908 properties: databaseAccountOfferType: Standard diff --git a/config/samples/azure_v1_rediscache.yaml b/config/samples/azure_v1_rediscache.yaml index 54e680889a8..a78959b3dfd 100644 --- a/config/samples/azure_v1_rediscache.yaml +++ b/config/samples/azure_v1_rediscache.yaml @@ -1,10 +1,10 @@ apiVersion: azure.microsoft.com/v1 kind: RedisCache metadata: - name: rediscache-sample123xyzkj + name: rediscache-sample1908xyzkj spec: location: westus - resourceGroup: resourcegroup-sample-1907 + resourceGroup: resourcegroup-sample-1908 properties: sku: name: Basic diff --git a/config/samples/azure_v1_resourcegroup.yaml b/config/samples/azure_v1_resourcegroup.yaml index fe67d8499f0..1f2009eaec1 100644 --- a/config/samples/azure_v1_resourcegroup.yaml +++ b/config/samples/azure_v1_resourcegroup.yaml @@ -1,6 +1,6 @@ apiVersion: azure.microsoft.com/v1 kind: ResourceGroup metadata: - name: resourcegroup-sample-1907 + name: resourcegroup-sample-1908 spec: location: "westus" diff --git a/config/samples/azure_v1_storage.yaml b/config/samples/azure_v1_storage.yaml index df8ba9d465e..88a1d9c1671 100644 --- a/config/samples/azure_v1_storage.yaml +++ b/config/samples/azure_v1_storage.yaml @@ -1,10 +1,10 @@ apiVersion: azure.microsoft.com/v1 kind: Storage metadata: - name: storagesample123xyzkj + name: storagesample1908xyzkj spec: location: westus - resourceGroup: resourcegroup-sample-1907 + resourceGroup: resourcegroup-sample-1908 sku: name: Standard_RAGRS kind: StorageV2 diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go index 21c708369bc..181ac877935 100644 --- a/controllers/cosmosdb_controller.go +++ b/controllers/cosmosdb_controller.go @@ -39,7 +39,6 @@ import ( "github.com/Azure/azure-service-operator/pkg/errhelp" "github.com/Azure/azure-service-operator/pkg/helpers" "github.com/Azure/azure-service-operator/pkg/resourcemanager/cosmosdbs" - //"github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/tools/record" ) @@ -62,7 +61,6 @@ func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("cosmosdb", req.NamespacedName) // Fetch the CosmosDB instance - //instance := &servicev1alpha1.CosmosDB{} var instance azurev1.CosmosDB requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) @@ -78,39 +76,7 @@ func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("Getting CosmosDB Account", "CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) - log.V(1).Info("Describing CosmosDB Account", "CosmosDB", instance) - - /* Before Refactor - // examine DeletionTimestamp to determine if object is under deletion - if instance.ObjectMeta.DeletionTimestamp.IsZero() { - // The object is not being deleted, so if it does not have our finalizer, - // then lets add the finalizer and update the object. This is equivalent - // registering our finalizer. - if !helpers.ContainsString(instance.ObjectMeta.Finalizers, cosmosDBFinalizerName) { - instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, cosmosDBFinalizerName) - if err := r.Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - } - } else { - // The object is being deleted - if helpers.ContainsString(instance.ObjectMeta.Finalizers, cosmosDBFinalizerName) { - // our finalizer is present, so lets handle any external dependency - if err := r.deleteExternalResources(instance); err != nil { - // if fail to delete the external dependency here, return with error - // so that it can be retried - return ctrl.Result{}, err - } - - // remove our finalizer from the list and update it. - instance.ObjectMeta.Finalizers = helpers.RemoveString(instance.ObjectMeta.Finalizers, cosmosDBFinalizerName) - if err := r.Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - } - - return ctrl.Result{}, err - */ + log.V(1).Info("Describing CosmosDB Account", "CosmosDB", instance) if helpers.IsBeingDeleted(&instance) { if helpers.HasFinalizer(&instance, cosmosDBFinalizerName) { @@ -228,65 +194,10 @@ func (r *CosmosDBReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -/* Before Refactor - resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "cosmosdb", instance.Name, instance.Namespace) - deploymentName := instance.Status.DeploymentName - if deploymentName != "" { - log.Info("Checking deployment", "ResourceGroupName", resourceGroupName, "DeploymentName", deploymentName) - de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) - provisioningState := *de.Properties.ProvisioningState - if helpers.IsDeploymentComplete(provisioningState) { - log.Info("Deployment is complete", "ProvisioningState", provisioningState) - _, err = r.updateStatus(req, resourceGroupName, deploymentName, provisioningState, de.Properties.Outputs) - if err != nil { - return ctrl.Result{}, err - } - if instance.Status.Generation == instance.ObjectMeta.Generation { - return ctrl.Result{}, nil - } - } else { - log.Info("Requeue the request", "ProvisioningState", provisioningState) - return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil - } - } - - instance.Status.Generation = instance.ObjectMeta.Generation - if err := r.Status().Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - - log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) - tags := map[string]*string{ - "name": to.StringPtr(instance.Name), - "namespace": to.StringPtr(instance.Namespace), - "kind": to.StringPtr("cosmosdb"), - } - group.CreateGroup(ctx, resourceGroupName, instance.Spec.Location, tags) - - log.Info("Reconciling CosmosDB", "CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) - template := cosmosdbtemplate.New(instance) - deploymentName, err = template.CreateDeployment(ctx, resourceGroupName) - if err != nil { - log.Error(err, "Failed to reconcile CosmosDB") - return ctrl.Result{}, err - } - - de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) - _, err = r.updateStatus(req, resourceGroupName, deploymentName, *de.Properties.ProvisioningState, nil) - if err != nil { - return ctrl.Result{}, err - } - - // CosmosDB created successfully - don't requeue - return ctrl.Result{}, nil -} - -func (r *CosmosDBReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&servicev1alpha1.CosmosDB{}). - Complete(r) -} - +/* Below code was from prior to refactor. + Left here for future reference for pulling out values post deployment. + + func (r *CosmosDBReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.CosmosDB, error) { ctx := context.Background() log := r.Log.WithValues("cosmosdb", req.NamespacedName) @@ -323,30 +234,6 @@ func (r *CosmosDBReconciler) updateStatus(req ctrl.Request, resourceGroupName, d return resourceCopy, nil } -func (r *CosmosDBReconciler) deleteExternalResources(instance *servicev1alpha1.CosmosDB) error { - // - // delete any external resources associated with the cosmosdb - // - // Ensure that delete implementation is idempotent and safe to invoke - // multiple types for same object. - ctx := context.Background() - log := r.Log.WithValues("CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) - - resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "cosmosdb", instance.Name, instance.Namespace) - log.Info("Deleting CosmosDB Account", "ResourceGroupName", resourceGroupName) - _, err := group.DeleteGroup(ctx, resourceGroupName) - if err != nil && helpers.IgnoreAzureResourceNotFound(err) != nil { - return err - } - - err = helpers.DeleteSecret(instance.Name, instance.Namespace) - if err != nil && helpers.IgnoreKubernetesResourceNotFound(err) != nil { - return err - } - - return nil -} - func (r *CosmosDBReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.CosmosDB) (err error) { ctx := context.Background() log := r.Log.WithValues("cosmosdb", req.NamespacedName) diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go index 210a226d060..24f922e5071 100644 --- a/controllers/rediscache_controller.go +++ b/controllers/rediscache_controller.go @@ -39,7 +39,6 @@ import ( "github.com/Azure/azure-service-operator/pkg/errhelp" "github.com/Azure/azure-service-operator/pkg/helpers" "github.com/Azure/azure-service-operator/pkg/resourcemanager/rediscaches" - //"github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/tools/record" ) @@ -61,7 +60,6 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) log := r.Log.WithValues("rediscache", req.NamespacedName) // Fetch the Redis Cache instance - //instance := &servicev1alpha1.RedisCache{} var instance azurev1.RedisCache requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) @@ -79,94 +77,6 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) log.Info("Getting Redis Cache", "RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) log.V(1).Info("Describing Redis Cache", "RedisCache", instance) - /* Pre refactor - // examine DeletionTimestamp to determine if object is under deletion - if instance.ObjectMeta.DeletionTimestamp.IsZero() { - // The object is not being deleted, so if it does not have our finalizer, - // then lets add the finalizer and update the object. This is equivalent - // registering our finalizer. - if !helpers.ContainsString(instance.ObjectMeta.Finalizers, redisCacheFinalizerName) { - instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, redisCacheFinalizerName) - if err := r.Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - } - } else { - // The object is being deleted - if helpers.ContainsString(instance.ObjectMeta.Finalizers, redisCacheFinalizerName) { - // our finalizer is present, so lets handle any external dependency - if err := r.deleteExternalResources(instance); err != nil { - // if fail to delete the external dependency here, return with error - // so that it can be retried - return ctrl.Result{}, err - } - - // remove our finalizer from the list and update it. - instance.ObjectMeta.Finalizers = helpers.RemoveString(instance.ObjectMeta.Finalizers, redisCacheFinalizerName) - if err := r.Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - } - - return ctrl.Result{}, err - - } - - resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "redisCache", instance.Name, instance.Namespace) - deploymentName := instance.Status.DeploymentName - if deploymentName != "" { - log.Info("Checking deployment", "ResourceGroupName", resourceGroupName, "DeploymentName", deploymentName) - de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) - if de.Properties == nil || de.Properties.ProvisioningState == nil { - return ctrl.Result{}, nil - } - provisioningState := *de.Properties.ProvisioningState - if helpers.IsDeploymentComplete(provisioningState) { - log.Info("Deployment is complete", "ProvisioningState", provisioningState) - _, err = r.updateStatus(req, resourceGroupName, deploymentName, provisioningState, de.Properties.Outputs) - if err != nil { - return ctrl.Result{}, err - } - if instance.Status.Generation == instance.ObjectMeta.Generation { - return ctrl.Result{}, nil - } - } else { - log.Info("Requeue the request", "ProvisioningState", provisioningState) - return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil - } - } - - instance.Status.Generation = instance.ObjectMeta.Generation - if err := r.Status().Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - - log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) - tags := map[string]*string{ - "name": to.StringPtr(instance.Name), - "namespace": to.StringPtr(instance.Namespace), - "kind": to.StringPtr("redisCache"), - } - group.CreateGroup(ctx, resourceGroupName, instance.Spec.Location, tags) - - log.Info("Reconciling Redis Cache", "RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) - template := redisCacheTemplate.New(instance) - deploymentName, err = template.CreateDeployment(ctx, resourceGroupName) - if err != nil { - log.Error(err, "Failed to reconcile Redis Cache") - return ctrl.Result{}, err - } - - de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) - _, err = r.updateStatus(req, resourceGroupName, deploymentName, *de.Properties.ProvisioningState, nil) - if err != nil { - return ctrl.Result{}, err - } - - // Redis Cache created successfully - don't requeue - return ctrl.Result{}, nil - */ - if helpers.IsBeingDeleted(&instance) { if helpers.HasFinalizer(&instance, redisCacheFinalizerName) { if err := r.deleteExternal(&instance); err != nil { @@ -283,12 +193,8 @@ func (r *RedisCacheReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -/* Pre Refactor -func (r *RedisCacheReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&servicev1alpha1.RedisCache{}). - Complete(r) -} +/* Below code was from prior to refactor. +Left here for future reference for pulling out values post deployment. func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.RedisCache, error) { ctx := context.Background() @@ -327,30 +233,6 @@ func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, return resourceCopy, nil } -func (r *RedisCacheReconciler) deleteExternalResources(instance *servicev1alpha1.RedisCache) error { - // - // delete any external resources associated with the storage - // - // Ensure that delete implementation is idempotent and safe to invoke - // multiple types for same object. - ctx := context.Background() - log := r.Log.WithValues("RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) - - resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "redisCache", instance.Name, instance.Namespace) - log.Info("Deleting Redis Cache", "ResourceGroupName", resourceGroupName) - _, err := group.DeleteGroup(ctx, resourceGroupName) - if err != nil && helpers.IgnoreAzureResourceNotFound(err) != nil { - return err - } - - err = helpers.DeleteSecret(instance.Name, instance.Namespace) - if err != nil && helpers.IgnoreKubernetesResourceNotFound(err) != nil { - return err - } - - return nil -} - func (r *RedisCacheReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.RedisCache) (err error) { ctx := context.Background() log := r.Log.WithValues("redisCache", req.NamespacedName) diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index dbb40772712..e0af79dd5ee 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -31,23 +31,15 @@ import ( "strconv" "time" - //resoucegroupsresourcemanager "github.com/Azure/azure-service-operator/pkg/resourcemanager/resourcegroups" "github.com/go-logr/logr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" azurev1 "github.com/Azure/azure-service-operator/api/v1" - //servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" - //"github.com/Azure/azure-service-operator/pkg/client/deployment" - //"github.com/Azure/azure-service-operator/pkg/client/group" - //"github.com/Azure/azure-service-operator/pkg/config" "github.com/Azure/azure-service-operator/pkg/errhelp" helpers "github.com/Azure/azure-service-operator/pkg/helpers" "github.com/Azure/azure-service-operator/pkg/resourcemanager/storages" "k8s.io/client-go/tools/record" - // "github.com/Azure/azure-service-operator/pkg/helpers" - //storagetemplate "github.com/Azure/azure-service-operator/pkg/storage" - // "github.com/Azure/go-autorest/autorest/to" ) const storageFinalizerName = "storage.finalizers.azure.com" @@ -69,7 +61,6 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("storage", req.NamespacedName) // Fetch the Storage instance - //instance := &servicev1alpha1.Storage{} var instance azurev1.Storage requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) @@ -78,7 +69,6 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { } if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { - // if err != nil { log.Error(err, "unable to retrieve storage resource", "err", err.Error()) // we'll ignore not-found errors, since they can't be fixed by an immediate // requeue (we'll need to wait for a new notification), and we can get them @@ -90,37 +80,6 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { log.Info("Getting Storage Account", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) log.V(1).Info("Describing Storage Account", "Storage", instance) - //storageFinalizerName := "storage.finalizers.azure" - // examine DeletionTimestamp to determine if object is under deletion - /*if instance.ObjectMeta.DeletionTimestamp.IsZero() { - // The object is not being deleted, so if it does not have our finalizer, - // then lets add the finalizer and update the object. This is equivalent - // registering our finalizer. - if !helpers.ContainsString(instance.ObjectMeta.Finalizers, storageFinalizerName) { - instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, storageFinalizerName) - if err := r.Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - } - } else { - // The object is being deleted - if helpers.ContainsString(instance.ObjectMeta.Finalizers, storageFinalizerName) { - // our finalizer is present, so lets handle any external dependency - if err := r.deleteExternalResources(instance); err != nil { - // if fail to delete the external dependency here, return with error - // so that it can be retried - return ctrl.Result{}, err - } - - // remove our finalizer from the list and update it. - instance.ObjectMeta.Finalizers = helpers.RemoveString(instance.ObjectMeta.Finalizers, storageFinalizerName) - if err := r.Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - } - - return ctrl.Result{}, err - }*/ if helpers.IsBeingDeleted(&instance) { if helpers.HasFinalizer(&instance, storageFinalizerName) { if err := r.deleteExternal(&instance); err != nil { @@ -159,70 +118,6 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { r.Recorder.Event(&instance, "Normal", "Provisioned", "Storage "+instance.ObjectMeta.Name+" provisioned ") return ctrl.Result{}, nil - - //resourcegroupName := instance.ObjectMeta.Name - // log.Info("SubscriptionId: ", config.Instance.SubscriptionID) - // log.Info("ClusterName:", config.Instance.ClusterName) - // log.Info("instance.Name: ", instance.Name) - // log.Info("instance namespace: ", instance.Namespace) - //resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "storage", instance.Name, instance.Namespace) - /*esourceGroupName := helpers.AzrueResourceGroupName("b840fb3a-097c-462e-b658-5c6364683ae2", "myAKSCluster", "storage", "myinstancename", "mynamespace") - resourceGroupLocation := instance.Spec.Location - log.Info("storage controller", "rgn: ", resourceGroupName) - deploymentName := instance.Status.DeploymentName - if deploymentName != "" { - log.Info("Checking deployment", "ResourceGroupName", resourceGroupName, "DeploymentName", deploymentName) - de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) - if de.Properties == nil || de.Properties.ProvisioningState == nil { - return ctrl.Result{}, nil - } - provisioningState := *de.Properties.ProvisioningState - if helpers.IsDeploymentComplete(provisioningState) { - log.Info("Deployment is complete", "ProvisioningState", provisioningState) - _, err = r.updateStatus(req, resourceGroupName, deploymentName, provisioningState, de.Properties.Outputs) - if err != nil { - return ctrl.Result{}, err - } - if instance.Status.Generation == instance.ObjectMeta.Generation { - return ctrl.Result{}, nil - } - } else { - log.Info("Requeue the request", "ProvisioningState", provisioningState) - return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil - } - } - instance.Status.Generation = instance.ObjectMeta.Generation - if err := r.Status().Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - - log.Info("Creating a new resource group", "ResourceGroupName", resourceGroupName) - // tags := map[string]*string{ - // "name": to.StringPtr(instance.Name), - // "namespace": to.StringPtr(instance.Namespace), - // "kind": to.StringPtr("storage"), - // } - //group.CreateGroup(ctx, resourceGroupName, resourcegroupLocation, tags) - //from RGController _, err = resoucegroupsresourcemanager.CreateGroup(ctx, resourcegroupName, resourcegroupLocation) - _, err = resoucegroupsresourcemanager.CreateGroup(ctx, resourceGroupName, resourceGroupLocation) - - log.Info("Reconciling Storage", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) - template := storagetemplate.New(instance) - deploymentName, err = template.CreateDeployment(ctx, resourceGroupName) - if err != nil { - log.Error(err, "Failed to reconcile Storage") - return ctrl.Result{}, err - } - - de, _ := deployment.GetDeployment(ctx, resourceGroupName, deploymentName) - _, err = r.updateStatus(req, resourceGroupName, deploymentName, *de.Properties.ProvisioningState, nil) - if err != nil { - return ctrl.Result{}, err - } - - // Storage created successfully - don't requeue - return ctrl.Result{}, nil - */ } func (r *StorageReconciler) addFinalizer(instance *azurev1.Storage) error { @@ -244,7 +139,6 @@ func (r *StorageReconciler) reconcileExternal(instance *azurev1.Storage) error { kind := instance.Spec.Kind accessTier := instance.Spec.AccessTier enableHTTPSTrafficOnly := instance.Spec.EnableHTTPSTrafficOnly - //sku, kind, tags, accesstier enabblehttpstraffice // write information back to instance instance.Status.Provisioning = true @@ -304,7 +198,9 @@ func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -/* +/* Below code was from prior to refactor. + Left here for future reference for pulling out values post deployment. + func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.Storage, error) { ctx := context.Background() log := r.Log.WithValues("storage", req.NamespacedName) @@ -344,30 +240,6 @@ func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, de return resourceCopy, nil } -func (r *StorageReconciler) deleteExternalResources(instance *servicev1alpha1.Storage) error { - // - // delete any external resources associated with the storage - // - // Ensure that delete implementation is idempotent and safe to invoke - // multiple types for same object. - ctx := context.Background() - log := r.Log.WithValues("Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) - - resourceGroupName := helpers.AzrueResourceGroupName(config.Instance.SubscriptionID, config.Instance.ClusterName, "storage", instance.Name, instance.Namespace) - log.Info("Deleting Storage Account", "ResourceGroupName", resourceGroupName) - _, err := group.DeleteGroup(ctx, resourceGroupName) - if err != nil && helpers.IgnoreAzureResourceNotFound(err) != nil { - return err - } - - err = helpers.DeleteSecret(instance.Name, instance.Namespace) - if err != nil && helpers.IgnoreKubernetesResourceNotFound(err) != nil { - return err - } - - return nil -} - func (r *StorageReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.Storage) (err error) { ctx := context.Background() log := r.Log.WithValues("storage", req.NamespacedName) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 4d005a85d6b..4f9f94f799e 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -22,7 +22,6 @@ import ( "testing" azurev1 "github.com/Azure/azure-service-operator/api/v1" - servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" resourcemanagerconfig "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" eventhubs "github.com/Azure/azure-service-operator/pkg/resourcemanager/eventhubs" @@ -94,15 +93,6 @@ var _ = BeforeSuite(func(done Done) { Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) - err = servicev1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = servicev1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = servicev1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - err = azurev1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) diff --git a/main.go b/main.go index c7cdeadeff7..d5ef167cc56 100644 --- a/main.go +++ b/main.go @@ -19,9 +19,6 @@ import ( "flag" "os" - //"github.com/spf13/pflag" - - servicev1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" "k8s.io/apimachinery/pkg/runtime" azurev1 "github.com/Azure/azure-service-operator/api/v1" @@ -46,7 +43,6 @@ var ( func init() { - _ = servicev1alpha1.AddToScheme(scheme) azurev1.AddToScheme(scheme) kscheme.AddToScheme(scheme) _ = azurev1.AddToScheme(scheme) diff --git a/pkg/resourcemanager/cosmosdbs/cosmosdbs.go b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go index 560ba420e29..72ab24b39eb 100644 --- a/pkg/resourcemanager/cosmosdbs/cosmosdbs.go +++ b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go @@ -2,18 +2,13 @@ package cosmosdbs import ( "context" - //"encoding/json" - //"errors" "fmt" "log" - //uuid "github.com/satori/go.uuid" - "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2015-04-08/documentdb" azurev1 "github.com/Azure/azure-service-operator/api/v1" "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" - //"github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" ) @@ -51,7 +46,6 @@ func CreateCosmosDB(ctx context.Context, groupName string, }*/ dbKind := documentdb.DatabaseAccountKind(kind) - sDBType := string(dbType) /* @@ -107,39 +101,4 @@ func CreateCosmosDB(ctx context.Context, groupName string, func DeleteCosmosDB(ctx context.Context, groupName string, cosmosDBName string) (result documentdb.DatabaseAccountsDeleteFuture, err error) { cosmosDBClient := getCosmosDBClient() return cosmosDBClient.Delete(ctx, groupName, cosmosDBName) -} - -/* Pre-Refactor -// New generates a new object -func New(cosmosdb *azureV1alpha1.CosmosDB) *Template { - return &Template{ - CosmosDB: cosmosdb, - } -} - -// Template defines the dynamodb cfts -type Template struct { - CosmosDB *azureV1alpha1.CosmosDB -} - -func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { - deploymentName := uuid.NewV4().String() - asset, err := template.Asset("cosmosdb.json") - templateContents := make(map[string]interface{}) - json.Unmarshal(asset, &templateContents) - params := map[string]interface{}{ - "location": map[string]interface{}{ - "value": t.CosmosDB.Spec.Location, - }, - "kind": map[string]interface{}{ - "value": t.CosmosDB.Spec.Kind, - }, - "properties": map[string]interface{}{ - "value": t.CosmosDB.Spec.Properties, - }, - } - - err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) - return deploymentName, err -} -*/ +} \ No newline at end of file diff --git a/pkg/resourcemanager/rediscaches/rediscaches.go b/pkg/resourcemanager/rediscaches/rediscaches.go index 77f3483333a..8ebe0e2df29 100644 --- a/pkg/resourcemanager/rediscaches/rediscaches.go +++ b/pkg/resourcemanager/rediscaches/rediscaches.go @@ -2,13 +2,10 @@ package rediscaches import ( "context" - //"encoding/json" "errors" "fmt" "log" - //uuid "github.com/satori/go.uuid" - "github.com/Azure/azure-sdk-for-go/services/redis/mgmt/2018-03-01/redis" azurev1 "github.com/Azure/azure-service-operator/api/v1" "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" @@ -34,8 +31,6 @@ func CreateRedisCache(ctx context.Context, location string, sku azurev1.RedisCacheSku, enableNonSSLPort bool, - // kind azurev1.CosmosDBKind, - // dbType azurev1.CosmosDBDatabaseAccountOfferType, tags map[string]*string) (redis.ResourceType, error) { redisClient := getRedisCacheClient() @@ -88,45 +83,3 @@ func DeleteRedisCache(ctx context.Context, groupName string, redisCacheName stri redisClient := getRedisCacheClient() return redisClient.Delete(ctx, groupName, redisCacheName) } - -/* Before Refactor -// New generates a new object -func New(redisCache *azureV1alpha1.RedisCache) *Template { - return &Template{ - RedisCache: redisCache, - } -} - -// Template defines the dynamodb cfts -type Template struct { - RedisCache *azureV1alpha1.RedisCache -} - -func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { - deploymentName := uuid.NewV4().String() - asset, err := template.Asset("rediscache.json") - templateContents := make(map[string]interface{}) - json.Unmarshal(asset, &templateContents) - params := map[string]interface{}{ - "location": map[string]interface{}{ - "value": t.RedisCache.Spec.Location, - }, - "properties.sku.name": map[string]interface{}{ - "value": t.RedisCache.Spec.Properties.Sku.Name, - }, - "properties.sku.family": map[string]interface{}{ - "value": t.RedisCache.Spec.Properties.Sku.Family, - }, - "properties.sku.capacity": map[string]interface{}{ - "value": t.RedisCache.Spec.Properties.Sku.Capacity, - }, - "properties.enableNonSslPort": map[string]interface{}{ - "value": t.RedisCache.Spec.Properties.EnableNonSslPort, - }, - } - - err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) - return deploymentName, err -} - -*/ diff --git a/pkg/resourcemanager/storages/storages.go b/pkg/resourcemanager/storages/storages.go index 7f52314463e..153348fef20 100644 --- a/pkg/resourcemanager/storages/storages.go +++ b/pkg/resourcemanager/storages/storages.go @@ -2,17 +2,11 @@ package storages import ( "context" - //"encoding/json" "errors" "fmt" "log" - //uuid "github.com/satori/go.uuid" - "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage" - //azureV1alpha1 "github.com/Azure/azure-service-operator/api/v1alpha1" - //"github.com/Azure/azure-service-operator/pkg/client/deployment" - //"github.com/Azure/azure-service-operator/pkg/template" azurev1 "github.com/Azure/azure-service-operator/api/v1" "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" @@ -20,18 +14,6 @@ import ( "github.com/Azure/go-autorest/autorest/to" ) -// New generates a new object -// func New(storage *azureV1alpha1.Storage) *Template { -// return &Template{ -// Storage: storage, -// } -// } - -// // Template defines the dynamodb cfts -// type Template struct { -// Storage *azureV1alpha1.Storage -// } - func getStoragesClient() storage.AccountsClient { storagesClient := storage.NewAccountsClient(config.SubscriptionID()) a, err := iam.GetResourceManagementAuthorizer() @@ -53,10 +35,7 @@ func CreateStorage(ctx context.Context, groupName string, accessTier azurev1.StorageAccessTier, enableHTTPsTrafficOnly *bool) (storage.Account, error) { storagesClient := getStoragesClient() - // id, err := uuid.FromString(config.TenantID()) - // if err != nil { - // return storage.Account{}, err - // } + log.Println("Storage:AccountName" + storageAccountName) storageType := "Microsoft.Storage/storageAccounts" checkAccountParams := storage.AccountCheckNameAvailabilityParameters{Name: &storageAccountName, Type: &storageType} @@ -69,21 +48,14 @@ func CreateStorage(ctx context.Context, groupName string, log.Fatalf("storage account not available: %v\n", result.Reason) return storage.Account{}, errors.New("storage account not available") } - //mskuname := string(sku.Name) - //test := storage.Sku{Name{string(sku.Name)} - //msku := storage.Sku{Name : mskuname}//"test"} - //t2 := storage.Sku(sku) + sSku := storage.Sku{Name: storage.SkuName(sku.Name)} - //sKind := string(kind) - //sKind2 := storage.Kind{storage.Kind(kind)} sKind := storage.Kind(kind) - //sKind := storage.Kind{Kind : kind} sAccessTier := storage.AccessTier(accessTier) params := storage.AccountCreateParameters{ Location: to.StringPtr(location), Sku: &sSku, - //storage.Sku{Name{sku.Name}}, Kind: sKind, Tags: tags, Identity: nil, @@ -103,31 +75,3 @@ func DeleteStorage(ctx context.Context, groupName string, storageAccountName str storagesClient := getStoragesClient() return storagesClient.Delete(ctx, groupName, storageAccountName) } - -// Pre-Refactor -// func (t *Template) CreateDeployment(ctx context.Context, resourceGroupName string) (string, error) { -// deploymentName := uuid.NewV4().String() -// asset, err := template.Asset("storage.json") -// templateContents := make(map[string]interface{}) -// json.Unmarshal(asset, &templateContents) -// params := map[string]interface{}{ -// "location": map[string]interface{}{ -// "value": t.Storage.Spec.Location, -// }, -// "accountType": map[string]interface{}{ -// "value": t.Storage.Spec.Sku.Name, -// }, -// "kind": map[string]interface{}{ -// "value": t.Storage.Spec.Kind, -// }, -// "accessTier": map[string]interface{}{ -// "value": t.Storage.Spec.AccessTier, -// }, -// "supportsHttpsTrafficOnly": map[string]interface{}{ -// "value": *t.Storage.Spec.EnableHTTPSTrafficOnly, -// }, -// } - -// err = deployment.CreateDeployment(ctx, resourceGroupName, deploymentName, &templateContents, ¶ms) -// return deploymentName, err -// } From 11a20aa8dc12df8b55ff093ad5712c6c199a60c6 Mon Sep 17 00:00:00 2001 From: Chris Risner Date: Thu, 29 Aug 2019 12:46:14 -0600 Subject: [PATCH 34/34] Updating controllers logging calls --- controllers/cosmosdb_controller.go | 12 ++++++------ controllers/rediscache_controller.go | 6 +++--- controllers/storage_controller.go | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go index 181ac877935..28d019538c7 100644 --- a/controllers/cosmosdb_controller.go +++ b/controllers/cosmosdb_controller.go @@ -76,12 +76,12 @@ func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("Getting CosmosDB Account", "CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) - log.V(1).Info("Describing CosmosDB Account", "CosmosDB", instance) + log.V(1).Info("Describing CosmosDB Account", "CosmosDB", instance) if helpers.IsBeingDeleted(&instance) { if helpers.HasFinalizer(&instance, cosmosDBFinalizerName) { if err := r.deleteExternal(&instance); err != nil { - log.Info("Delete CosmosDB failed with ", err.Error()) + log.Info("Error", "Delete CosmosDB failed with ", err) return ctrl.Result{}, err } @@ -95,7 +95,7 @@ func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if !helpers.HasFinalizer(&instance, cosmosDBFinalizerName) { if err := r.addFinalizer(&instance); err != nil { - log.Info("Adding cosmosDB finalizer failed with ", err.Error()) + log.Info("Error", "Adding cosmosDB finalizer failed with ", err) return ctrl.Result{}, err } } @@ -194,10 +194,10 @@ func (r *CosmosDBReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -/* Below code was from prior to refactor. +/* Below code was from prior to refactor. Left here for future reference for pulling out values post deployment. - - + + func (r *CosmosDBReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.CosmosDB, error) { ctx := context.Background() log := r.Log.WithValues("cosmosdb", req.NamespacedName) diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go index 24f922e5071..b8cfef9064c 100644 --- a/controllers/rediscache_controller.go +++ b/controllers/rediscache_controller.go @@ -80,7 +80,7 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) if helpers.IsBeingDeleted(&instance) { if helpers.HasFinalizer(&instance, redisCacheFinalizerName) { if err := r.deleteExternal(&instance); err != nil { - log.Info("Delete Redis Cache failed with ", err.Error()) + log.Info("Error", "Delete Redis Cache failed with ", err) return ctrl.Result{}, err } @@ -94,7 +94,7 @@ func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) if !helpers.HasFinalizer(&instance, redisCacheFinalizerName) { if err := r.addFinalizer(&instance); err != nil { - log.Info("Adding redis cache finalizer failed with ", err.Error()) + log.Info("Error", "Adding redis cache finalizer failed with ", err) return ctrl.Result{}, err } } @@ -193,7 +193,7 @@ func (r *RedisCacheReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -/* Below code was from prior to refactor. +/* Below code was from prior to refactor. Left here for future reference for pulling out values post deployment. func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.RedisCache, error) { diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go index e0af79dd5ee..6408f88709f 100644 --- a/controllers/storage_controller.go +++ b/controllers/storage_controller.go @@ -83,7 +83,7 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if helpers.IsBeingDeleted(&instance) { if helpers.HasFinalizer(&instance, storageFinalizerName) { if err := r.deleteExternal(&instance); err != nil { - log.Info("Delete Storage failed with ", err.Error()) + log.Info("Error", "Delete Storage failed with ", err) return ctrl.Result{}, err } @@ -97,7 +97,7 @@ func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if !helpers.HasFinalizer(&instance, storageFinalizerName) { if err := r.addFinalizer(&instance); err != nil { - log.Info("Adding storage finalizer failed with ", err.Error()) + log.Info("Error", "Adding storage finalizer failed with ", err) return ctrl.Result{}, err } } @@ -198,7 +198,7 @@ func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -/* Below code was from prior to refactor. +/* Below code was from prior to refactor. Left here for future reference for pulling out values post deployment. func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.Storage, error) {