diff --git a/.envrc b/.envrc new file mode 100644 index 00000000000..37b10962dbc --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +export GO111MODULE=on diff --git a/.gitignore b/.gitignore index 12c5e121e94..53a14efc09c 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,9 @@ default.etcd # Output of the go coverage tool, specifically when used with LiteIDE *.out + +bin/ +vendor/ coverage.txt report.xml cover.html diff --git a/Makefile b/Makefile index 610cbfccf89..b85d526cf90 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,13 @@ # Image URL to use all building/pushing image targets + + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + IMG ?= controller:latest # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) CRD_OPTIONS ?= "crd:trivialVersions=true" @@ -70,6 +79,9 @@ docker-build: docker-push: docker push ${IMG} +# Build and Push the docker image +build-and-push: docker-build docker-push + # find or download controller-gen # download controller-gen if necessary controller-gen: @@ -80,6 +92,14 @@ else CONTROLLER_GEN=$(shell which controller-gen) endif +.PHONY: install-bindata +install-bindata: + go get -u github.com/jteeuwen/go-bindata/... + +.PHONE: +generate-template: + go-bindata -pkg template -prefix pkg/template/assets/ -o pkg/template/templates.go pkg/template/assets/ + create-kindcluster: ifeq (,$(shell kind get clusters)) @echo "no kind cluster" diff --git a/PROJECT b/PROJECT index 40562d2d55c..bf33b59a4d7 100644 --- a/PROJECT +++ b/PROJECT @@ -2,6 +2,15 @@ version: "2" domain: microsoft.com repo: github.com/Azure/azure-service-operator resources: +- group: service + version: v1 + kind: Storage +- group: service + version: v1 + kind: CosmosDB +- group: service + version: v1 + kind: RedisCache - group: azure version: v1 kind: Eventhub diff --git a/api/v1/cosmosdb_types.go b/api/v1/cosmosdb_types.go new file mode 100644 index 00000000000..292e54e3529 --- /dev/null +++ b/api/v1/cosmosdb_types.go @@ -0,0 +1,136 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// CosmosDBSpec defines the desired state of CosmosDB +type CosmosDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:MinLength=0 + + Location string `json:"location,omitempty"` + ResourceGroupName string `json:"resourceGroup"` + Kind CosmosDBKind `json:"kind,omitempty"` + Properties CosmosDBProperties `json:"properties,omitempty"` +} + +// CosmosDBKind enumerates the values for kind. +// Only one of the following kinds may be specified. +// If none of the following kinds is specified, the default one +// is GlobalDocumentDBKind. +// +kubebuilder:validation:Enum=GlobalDocumentDB;MongoDB +type CosmosDBKind string + +const ( + CosmosDBKindGlobalDocumentDB CosmosDBKind = "GlobalDocumentDB" + CosmosDBKindMongoDB CosmosDBKind = "MongoDB" +) + +// CosmosDBProperties the CosmosDBProperties of CosmosDB. +type CosmosDBProperties struct { + // CosmosDBDatabaseAccountOfferType - The offer type for the Cosmos DB database account. + DatabaseAccountOfferType CosmosDBDatabaseAccountOfferType `json:"databaseAccountOfferType,omitempty"` + //Locations []CosmosDBLocation `json:"locations,omitempty"` +} + +// +kubebuilder:validation:Enum=Standard +type CosmosDBDatabaseAccountOfferType string + +const ( + CosmosDBDatabaseAccountOfferTypeStandard CosmosDBDatabaseAccountOfferType = "Standard" +) + +/* +type CosmosDBLocation struct { + FailoverPriority int `json:"failoverPriority,omitempty"` + LocationName string `json:"locationName,omitempty"` + IsZoneRedundant bool `json:"isZoneRedundant,omitempty"` +} +*/ + +// CosmosDBStatus defines the observed state of CosmosDB +type CosmosDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // DeploymentName string `json:"deploymentName,omitempty"` + // ProvisioningState string `json:"provisioningState,omitempty"` + // Generation int64 `json:"generation,omitempty"` + Provisioning bool `json:"provisioning,omitempty"` + Provisioned bool `json:"provisioned,omitempty"` +} + +type CosmosDBOutput struct { + CosmosDBName string `json:"cosmosDBName,omitempty"` + PrimaryMasterKey string `json:"primaryMasterKey,omitempty"` + //SecondaryMasterKey string `json:"secondaryMasterKey,omitempty"` + //PrimaryReadonlyMasterKey string `json:"primaryReadonlyMasterKey,omitempty"` + //SecondaryReadonlyMasterKey string `json:"secondaryReadonlyMasterKey,omitempty"` +} + +// CosmosDBAdditionalResources holds the additional resources +type CosmosDBAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CosmosDB is the Schema for the cosmosdbs API +type CosmosDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CosmosDBSpec `json:"spec,omitempty"` + Status CosmosDBStatus `json:"status,omitempty"` + Output CosmosDBOutput `json:"output,omitempty"` + AdditionalResources CosmosDBAdditionalResources `json:"additionalResources,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// CosmosDBList contains a list of CosmosDB +type CosmosDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CosmosDB `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CosmosDB{}, &CosmosDBList{}) +} + +func (cosmosDB *CosmosDB) IsSubmitted() bool { + return cosmosDB.Status.Provisioning || cosmosDB.Status.Provisioned +} diff --git a/api/v1/rediscache_types.go b/api/v1/rediscache_types.go new file mode 100644 index 00000000000..826d42ed068 --- /dev/null +++ b/api/v1/rediscache_types.go @@ -0,0 +1,131 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// RedisCacheSpec defines the desired state of RedisCache +type RedisCacheSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:MinLength=0 + + Location string `json:"location,omitempty"` + ResourceGroupName string `json:"resourceGroup"` + Properties RedisCacheProperties `json:"properties,omitempty"` +} + +// RedisCacheProperties the properties of the Redis Cache. +type RedisCacheProperties struct { + Sku RedisCacheSku `json:"sku,omitempty"` + + EnableNonSslPort bool `json:"enableNonSslPort,omitempty"` +} + +// RedisCacheSku the SKU of the Redis Cache. +type RedisCacheSku struct { + // Name - The SKU name. Required for account creation; optional for update. + // Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS' + Name RedisCacheSkuName `json:"name,omitempty"` + + Family RedisCacheSkuFamily `json:"family,omitempty"` + + Capacity int32 `json:"capacity,omitempty"` +} + +type RedisCacheSkuName string + +const ( + Basic RedisCacheSkuName = "Basic" + Premium RedisCacheSkuName = "Premium" + Standard RedisCacheSkuName = "Standard" +) + +type RedisCacheSkuFamily string + +const ( + C RedisCacheSkuFamily = "C" + P RedisCacheSkuFamily = "P" +) + +// RedisCacheStatus defines the observed state of RedisCache +type RedisCacheStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // DeploymentName string `json:"deploymentName,omitempty"` + // ProvisioningState string `json:"provisioningState,omitempty"` + // Generation int64 `json:"generation,omitempty"` + Provisioning bool `json:"provisioning,omitempty"` + Provisioned bool `json:"provisioned,omitempty"` +} + +type RedisCacheOutput struct { + RedisCacheName string `json:"redisCacheName,omitempty"` + PrimaryKey string `json:"primaryKey,omitempty"` + SecondaryKey string `json:"secondaryKey,omitempty"` +} + +// StorageAdditionalResources holds the additional resources +type RedisCacheAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// RedisCache is the Schema for the rediscaches API +type RedisCache struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RedisCacheSpec `json:"spec,omitempty"` + Status RedisCacheStatus `json:"status,omitempty"` + Output RedisCacheOutput `json:"output,omitempty"` + AdditionalResources RedisCacheAdditionalResources `json:"additionalResources,omitempty"` +} + +// +kubebuilder:object:root=true + +// RedisCacheList contains a list of RedisCache +type RedisCacheList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RedisCache `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RedisCache{}, &RedisCacheList{}) +} + +func (redisCache *RedisCache) IsSubmitted() bool { + return redisCache.Status.Provisioning || redisCache.Status.Provisioned +} diff --git a/api/v1/storage_types.go b/api/v1/storage_types.go new file mode 100644 index 00000000000..4a32f9f737b --- /dev/null +++ b/api/v1/storage_types.go @@ -0,0 +1,136 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// StorageSpec defines the desired state of Storage +type StorageSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:MinLength=0 + + Location string `json:"location,omitempty"` + ResourceGroupName string `json:"resourceGroup"` + + Sku StorageSku `json:"sku,omitempty"` + + Kind StorageKind `json:"kind,omitempty"` + + AccessTier StorageAccessTier `json:"accessTier,omitempty"` + + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` +} + +// Sku the SKU of the storage account. +type StorageSku struct { + // Name - The SKU name. Required for account creation; optional for update. + // Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS', 'PremiumZRS', 'StandardGZRS', 'StandardRAGZRS' + Name StorageSkuName `json:"name,omitempty"` +} + +// StorageSkuName enumerates the values for sku name. +// Only one of the following sku names may be specified. +// If none of the following sku names is specified, the default one +// is StorageV2. +// +kubebuilder:validation:Enum=Premium_LRS;Premium_ZRS;Standard_GRS;Standard_GZRS;Standard_LRS;Standard_RAGRS;Standard_RAGZRS;Standard_ZRS +type StorageSkuName string + +// StorageKind enumerates the values for kind. +// Only one of the following kinds may be specified. +// If none of the following kinds is specified, the default one +// is StorageV2. +// +kubebuilder:validation:Enum=BlobStorage;BlockBlobStorage;FileStorage;Storage;StorageV2 +type StorageKind string + +// AccessTier enumerates the values for access tier. +// Only one of the following access tiers may be specified. +// If none of the following access tiers is specified, the default one +// is Hot. +// +kubebuilder:validation:Enum=Cool;Hot +type StorageAccessTier string + +// StorageStatus defines the observed state of Storage +type StorageStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // DeploymentName string `json:"deploymentName,omitempty"` + // ProvisioningState string `json:"provisioningState,omitempty"` + // Generation int64 `json:"generation,omitempty"` + Provisioning bool `json:"provisioning,omitempty"` + Provisioned bool `json:"provisioned,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Storage is the Schema for the storages API +type Storage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StorageSpec `json:"spec,omitempty"` + Status StorageStatus `json:"status,omitempty"` + Output StorageOutput `json:"output,omitempty"` + AdditionalResources StorageAdditionalResources `json:"additionalResources,omitempty"` +} + +type StorageOutput struct { + StorageAccountName string `json:"storageAccountName,omitempty"` + Key1 string `json:"key1,omitempty"` + Key2 string `json:"key2,omitempty"` + ConnectionString1 string `json:"connectionString1,omitempty"` + ConnectionString2 string `json:"connectionString2,omitempty"` +} + +// StorageAdditionalResources holds the additional resources +type StorageAdditionalResources struct { + Secrets []string `json:"secrets,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// StorageList contains a list of Storage +type StorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Storage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Storage{}, &StorageList{}) +} + +func (storage *Storage) IsSubmitted() bool { + return storage.Status.Provisioning || storage.Status.Provisioned +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 760242cc0f6..2f5e28346aa 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -112,6 +112,148 @@ func (in *ConsumerGroupStatus) DeepCopy() *ConsumerGroupStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDB) DeepCopyInto(out *CosmosDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDB. +func (in *CosmosDB) DeepCopy() *CosmosDB { + if in == nil { + return nil + } + out := new(CosmosDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CosmosDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBAdditionalResources) DeepCopyInto(out *CosmosDBAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBAdditionalResources. +func (in *CosmosDBAdditionalResources) DeepCopy() *CosmosDBAdditionalResources { + if in == nil { + return nil + } + out := new(CosmosDBAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBList) DeepCopyInto(out *CosmosDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CosmosDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBList. +func (in *CosmosDBList) DeepCopy() *CosmosDBList { + if in == nil { + return nil + } + out := new(CosmosDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CosmosDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBOutput) DeepCopyInto(out *CosmosDBOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBOutput. +func (in *CosmosDBOutput) DeepCopy() *CosmosDBOutput { + if in == nil { + return nil + } + out := new(CosmosDBOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBProperties) DeepCopyInto(out *CosmosDBProperties) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBProperties. +func (in *CosmosDBProperties) DeepCopy() *CosmosDBProperties { + if in == nil { + return nil + } + out := new(CosmosDBProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBSpec) DeepCopyInto(out *CosmosDBSpec) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBSpec. +func (in *CosmosDBSpec) DeepCopy() *CosmosDBSpec { + if in == nil { + return nil + } + out := new(CosmosDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CosmosDBStatus) DeepCopyInto(out *CosmosDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CosmosDBStatus. +func (in *CosmosDBStatus) DeepCopy() *CosmosDBStatus { + if in == nil { + return nil + } + out := new(CosmosDBStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eventhub) DeepCopyInto(out *Eventhub) { *out = *in @@ -465,6 +607,164 @@ func (in *KeyVaultStatus) DeepCopy() *KeyVaultStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCache) DeepCopyInto(out *RedisCache) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCache. +func (in *RedisCache) DeepCopy() *RedisCache { + if in == nil { + return nil + } + out := new(RedisCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCache) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheAdditionalResources) DeepCopyInto(out *RedisCacheAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheAdditionalResources. +func (in *RedisCacheAdditionalResources) DeepCopy() *RedisCacheAdditionalResources { + if in == nil { + return nil + } + out := new(RedisCacheAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheList) DeepCopyInto(out *RedisCacheList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RedisCache, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheList. +func (in *RedisCacheList) DeepCopy() *RedisCacheList { + if in == nil { + return nil + } + out := new(RedisCacheList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RedisCacheList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheOutput) DeepCopyInto(out *RedisCacheOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheOutput. +func (in *RedisCacheOutput) DeepCopy() *RedisCacheOutput { + if in == nil { + return nil + } + out := new(RedisCacheOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheProperties) DeepCopyInto(out *RedisCacheProperties) { + *out = *in + out.Sku = in.Sku +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheProperties. +func (in *RedisCacheProperties) DeepCopy() *RedisCacheProperties { + if in == nil { + return nil + } + out := new(RedisCacheProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheSku) DeepCopyInto(out *RedisCacheSku) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSku. +func (in *RedisCacheSku) DeepCopy() *RedisCacheSku { + if in == nil { + return nil + } + out := new(RedisCacheSku) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheSpec) DeepCopyInto(out *RedisCacheSpec) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheSpec. +func (in *RedisCacheSpec) DeepCopy() *RedisCacheSpec { + if in == nil { + return nil + } + out := new(RedisCacheSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisCacheStatus) DeepCopyInto(out *RedisCacheStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisCacheStatus. +func (in *RedisCacheStatus) DeepCopy() *RedisCacheStatus { + if in == nil { + return nil + } + out := new(RedisCacheStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceGroup) DeepCopyInto(out *ResourceGroup) { *out = *in @@ -553,3 +853,150 @@ func (in *ResourceGroupStatus) DeepCopy() *ResourceGroupStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + out.Output = in.Output + in.AdditionalResources.DeepCopyInto(&out.AdditionalResources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Storage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageAdditionalResources) DeepCopyInto(out *StorageAdditionalResources) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageAdditionalResources. +func (in *StorageAdditionalResources) DeepCopy() *StorageAdditionalResources { + if in == nil { + return nil + } + out := new(StorageAdditionalResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageList) DeepCopyInto(out *StorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. +func (in *StorageList) DeepCopy() *StorageList { + if in == nil { + return nil + } + out := new(StorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOutput) DeepCopyInto(out *StorageOutput) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOutput. +func (in *StorageOutput) DeepCopy() *StorageOutput { + if in == nil { + return nil + } + out := new(StorageOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSku) DeepCopyInto(out *StorageSku) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSku. +func (in *StorageSku) DeepCopy() *StorageSku { + if in == nil { + return nil + } + out := new(StorageSku) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + out.Sku = in.Sku + if in.EnableHTTPSTrafficOnly != nil { + in, out := &in.EnableHTTPSTrafficOnly, &out.EnableHTTPSTrafficOnly + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { + if in == nil { + return nil + } + out := new(StorageStatus) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/azure.microsoft.com_cosmosdbs.yaml b/config/crd/bases/azure.microsoft.com_cosmosdbs.yaml new file mode 100644 index 00000000000..ed4c39e607a --- /dev/null +++ b/config/crd/bases/azure.microsoft.com_cosmosdbs.yaml @@ -0,0 +1,465 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: cosmosdbs.azure.microsoft.com +spec: + group: azure.microsoft.com + names: + kind: CosmosDB + plural: cosmosdbs + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + description: CosmosDB is the Schema for the cosmosdbs API + properties: + additionalResources: + description: CosmosDBAdditionalResources holds the additional resources + properties: + secrets: + items: + type: string + type: array + type: object + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + description: ObjectMeta is metadata that all persisted resources must have, + which includes all objects users must create. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with + a resource that may be set by external tools to store and retrieve + arbitrary metadata. They are not queryable and should be preserved + when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + clusterName: + description: The name of the cluster which the object belongs to. This + is used to distinguish resources with same name and namespace in different + clusters. This field is not set anywhere right now and apiserver is + going to ignore it if set in create or update request. + type: string + creationTimestamp: + description: "CreationTimestamp is a timestamp representing the server + time when this object was created. It is not guaranteed to be set + in happens-before order across separate operations. Clients may not + set this value. It is represented in RFC3339 form and is in UTC. \n + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to gracefully + terminate before it will be removed from the system. Only set when + deletionTimestamp is also set. May only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: "DeletionTimestamp is RFC 3339 date and time at which this + resource will be deleted. This field is set by the server when a graceful + deletion is requested by the user, and is not directly settable by + a client. The resource is expected to be deleted (no longer visible + from resource lists, and not reachable by name) after the time in + this field, once the finalizers list is empty. As long as the finalizers + list contains items, deletion is blocked. Once the deletionTimestamp + is set, this value may not be unset or be set further into the future, + although it may be shortened or the resource may be deleted prior + to this time. For example, a user may request that a pod is deleted + in 30 seconds. The Kubelet will react by sending a graceful termination + signal to the containers in the pod. After that 30 seconds, the Kubelet + will send a hard termination signal (SIGKILL) to the container and + after cleanup, remove the pod from the API. In the presence of network + partitions, this object may still exist after this timestamp, until + an administrator or automated process can determine the resource is + fully terminated. If not set, graceful deletion of the object has + not been requested. \n Populated by the system when a graceful deletion + is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from the registry. + Each entry is an identifier for the responsible component that will + remove the entry from the list. If the deletionTimestamp of the object + is non-nil, entries in this list can only be removed. + items: + type: string + type: array + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules as the Name + field, and may be truncated by the length of the suffix required to + make the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return a 409 - + instead, it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time allotted, + and the client should retry (optionally after the time indicated in + the Retry-After header). \n Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + generation: + description: A sequence number representing a specific generation of + the desired state. Populated by the system. Read-only. + format: int64 + type: integer + initializers: + description: "An initializer is a controller which enforces some system + invariant at object creation time. This field is a list of initializers + that have not yet acted on this object. If nil or empty, this object + has been completely initialized. Otherwise, the object is considered + uninitialized and is hidden (in list/watch and get calls) from clients + that haven't explicitly asked to observe uninitialized objects. \n + When an object is created, the system will populate this list with + the current set of initializers. Only privileged users may set or + modify this list. Once it is empty, it may not be modified further + by any user. \n DEPRECATED - initializers are an alpha field and will + be removed in v1.15." + properties: + pending: + description: Pending is a list of initializers that must execute + in order before this object is visible. When the last pending + initializer is removed, and no failing result is set, the initializers + struct will be set to nil and the object is considered as initialized + and visible to all clients. + items: + description: Initializer is information about an initializer that + has not yet completed. + properties: + name: + description: name of the process that is responsible for initializing + this object. + type: string + required: + - name + type: object + type: array + result: + description: If result is set with the Failure field, the object + will be persisted to storage and then deleted, ensuring that other + clients can observe the deletion. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + code: + description: Suggested HTTP return code for this status, 0 if + not set. + format: int32 + type: integer + details: + description: Extended data associated with the reason. Each + reason may define its own extended details. This field is + optional and the data returned is not guaranteed to conform + to any schema except that defined by the reason type. + properties: + causes: + description: The Causes array includes more details associated + with the StatusReason failure. Not all StatusReasons may + provide detailed causes. + items: + description: StatusCause provides more information about + an api.Status failure, including cases when multiple + errors are encountered. + properties: + field: + description: "The field of the resource that has caused + this error, as named by its JSON serialization. + May include dot and postfix notation for nested + attributes. Arrays are zero-indexed. Fields may + appear more than once in an array of causes due + to fields having multiple errors. Optional. \n Examples: + \ \"name\" - the field \"name\" on the current + resource \"items[0].name\" - the field \"name\" + on the first array entry in \"items\"" + type: string + message: + description: A human-readable description of the cause + of the error. This field may be presented as-is + to a reader. + type: string + reason: + description: A machine-readable description of the + cause of the error. If this value is empty there + is no information available. + type: string + type: object + type: array + group: + description: The group attribute of the resource associated + with the status StatusReason. + type: string + kind: + description: 'The kind attribute of the resource associated + with the status StatusReason. On some operations may differ + from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: The name attribute of the resource associated + with the status StatusReason (when there is a single name + which can be described). + type: string + retryAfterSeconds: + description: If specified, the time in seconds before the + operation should be retried. Some errors may indicate + the client must take an alternate action - for those errors + this field may indicate how long to wait before taking + the alternate action. + format: int32 + type: integer + uid: + description: 'UID of the resource. (when there is a single + resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + message: + description: A human-readable description of the status of this + operation. + type: string + metadata: + description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + properties: + continue: + description: continue may be set if the user set a limit + on the number of items returned, and indicates that the + server has more data available. The value is opaque and + may be used to issue another request to the endpoint that + served this list to retrieve the next set of available + objects. Continuing a consistent list may not be possible + if the server configuration has changed or more than a + few minutes have passed. The resourceVersion field returned + when using this continue value will be identical to the + value in the first response, unless you have received + this token from an error message. + type: string + resourceVersion: + description: 'String that identifies the server''s internal + version of this object that can be used by clients to + determine when objects have changed. Value must be treated + as opaque by clients and passed unmodified back to the + server. Populated by the system. Read-only. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + selfLink: + description: selfLink is a URL representing this object. + Populated by the system. Read-only. + type: string + type: object + reason: + description: A machine-readable description of why this operation + is in the "Failure" status. If this value is empty there is + no information available. A Reason clarifies an HTTP status + code but does not override it. + type: string + status: + description: 'Status of the operation. One of: "Success" or + "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + type: string + type: object + required: + - pending + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: "ManagedFields maps workflow-id and version to the set + of fields that are managed by that workflow. This is mostly for internal + housekeeping, and users typically shouldn't need to set or understand + this field. A workflow can be the user's name, a controller's name, + or the name of a specific apply path like \"ci-cd\". The set of fields + is always in the version that the workflow used when modifying the + object. \n This field is alpha and can be changed or removed without + notice." + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet and the + group version of the resource that the fieldset applies to. + properties: + apiVersion: + description: APIVersion defines the version of this resource that + this field set applies to. The format is "group/version" just + like the top-level APIVersion field. It is necessary to track + the version of a field set because it cannot be automatically + converted. + type: string + fields: + additionalProperties: true + description: Fields identifies a set of fields. + type: object + manager: + description: Manager is an identifier of the workflow managing + these fields. + type: string + operation: + description: Operation is the type of operation which lead to + this ManagedFieldsEntry being created. The only valid values + for this field are 'Apply' and 'Update'. + type: string + time: + description: Time is timestamp of when these fields were set. + It should always be empty if Operation is 'Apply' + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required when + creating resources, although some resources may allow a client to + request the generation of an appropriate name automatically. Name + is primarily intended for creation idempotence and configuration definition. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. + An empty namespace is equivalent to the \"default\" namespace, but + \"default\" is the canonical representation. Not all objects are required + to be scoped to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this list + will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you + identify an owning object. An owning object must be in the same + namespace as the dependent, or be cluster-scoped, so there is no + namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. To + set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: "An opaque value that represents the internal version of + this object that can be used by clients to determine when objects + have changed. May be used for optimistic concurrency, change detection, + and the watch operation on a resource or set of resources. Clients + must treat these values as opaque and passed unmodified back to the + server. They may only be valid for a particular resource or set of + resources. \n Populated by the system. Read-only. Value must be treated + as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" + type: string + selfLink: + description: SelfLink is a URL representing this object. Populated by + the system. Read-only. + type: string + uid: + description: "UID is the unique in time and space value for this object. + It is typically generated by the server on successful creation of + a resource and is not allowed to change on PUT operations. \n Populated + by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + type: string + type: object + output: + properties: + cosmosDBName: + type: string + primaryMasterKey: + type: string + type: object + spec: + description: CosmosDBSpec defines the desired state of CosmosDB + properties: + kind: + description: CosmosDBKind enumerates the values for kind. Only one of + the following kinds may be specified. If none of the following kinds + is specified, the default one is GlobalDocumentDBKind. + enum: + - GlobalDocumentDB + - MongoDB + type: string + location: + minLength: 0 + type: string + properties: + description: CosmosDBProperties the CosmosDBProperties of CosmosDB. + properties: + databaseAccountOfferType: + description: CosmosDBDatabaseAccountOfferType - The offer type for + the Cosmos DB database account. + enum: + - Standard + type: string + type: object + resourceGroup: + type: string + required: + - resourceGroup + type: object + status: + description: CosmosDBStatus defines the observed state of CosmosDB + properties: + provisioned: + type: boolean + provisioning: + description: DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` Generation int64 `json:"generation,omitempty"` + type: boolean + type: object + type: object + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/azure.microsoft.com_rediscaches.yaml b/config/crd/bases/azure.microsoft.com_rediscaches.yaml new file mode 100644 index 00000000000..ede9cc107fa --- /dev/null +++ b/config/crd/bases/azure.microsoft.com_rediscaches.yaml @@ -0,0 +1,470 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: rediscaches.azure.microsoft.com +spec: + group: azure.microsoft.com + names: + kind: RedisCache + plural: rediscaches + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + description: RedisCache is the Schema for the rediscaches API + properties: + additionalResources: + description: StorageAdditionalResources holds the additional resources + properties: + secrets: + items: + type: string + type: array + type: object + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + description: ObjectMeta is metadata that all persisted resources must have, + which includes all objects users must create. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with + a resource that may be set by external tools to store and retrieve + arbitrary metadata. They are not queryable and should be preserved + when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + clusterName: + description: The name of the cluster which the object belongs to. This + is used to distinguish resources with same name and namespace in different + clusters. This field is not set anywhere right now and apiserver is + going to ignore it if set in create or update request. + type: string + creationTimestamp: + description: "CreationTimestamp is a timestamp representing the server + time when this object was created. It is not guaranteed to be set + in happens-before order across separate operations. Clients may not + set this value. It is represented in RFC3339 form and is in UTC. \n + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to gracefully + terminate before it will be removed from the system. Only set when + deletionTimestamp is also set. May only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: "DeletionTimestamp is RFC 3339 date and time at which this + resource will be deleted. This field is set by the server when a graceful + deletion is requested by the user, and is not directly settable by + a client. The resource is expected to be deleted (no longer visible + from resource lists, and not reachable by name) after the time in + this field, once the finalizers list is empty. As long as the finalizers + list contains items, deletion is blocked. Once the deletionTimestamp + is set, this value may not be unset or be set further into the future, + although it may be shortened or the resource may be deleted prior + to this time. For example, a user may request that a pod is deleted + in 30 seconds. The Kubelet will react by sending a graceful termination + signal to the containers in the pod. After that 30 seconds, the Kubelet + will send a hard termination signal (SIGKILL) to the container and + after cleanup, remove the pod from the API. In the presence of network + partitions, this object may still exist after this timestamp, until + an administrator or automated process can determine the resource is + fully terminated. If not set, graceful deletion of the object has + not been requested. \n Populated by the system when a graceful deletion + is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from the registry. + Each entry is an identifier for the responsible component that will + remove the entry from the list. If the deletionTimestamp of the object + is non-nil, entries in this list can only be removed. + items: + type: string + type: array + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules as the Name + field, and may be truncated by the length of the suffix required to + make the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return a 409 - + instead, it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time allotted, + and the client should retry (optionally after the time indicated in + the Retry-After header). \n Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + generation: + description: A sequence number representing a specific generation of + the desired state. Populated by the system. Read-only. + format: int64 + type: integer + initializers: + description: "An initializer is a controller which enforces some system + invariant at object creation time. This field is a list of initializers + that have not yet acted on this object. If nil or empty, this object + has been completely initialized. Otherwise, the object is considered + uninitialized and is hidden (in list/watch and get calls) from clients + that haven't explicitly asked to observe uninitialized objects. \n + When an object is created, the system will populate this list with + the current set of initializers. Only privileged users may set or + modify this list. Once it is empty, it may not be modified further + by any user. \n DEPRECATED - initializers are an alpha field and will + be removed in v1.15." + properties: + pending: + description: Pending is a list of initializers that must execute + in order before this object is visible. When the last pending + initializer is removed, and no failing result is set, the initializers + struct will be set to nil and the object is considered as initialized + and visible to all clients. + items: + description: Initializer is information about an initializer that + has not yet completed. + properties: + name: + description: name of the process that is responsible for initializing + this object. + type: string + required: + - name + type: object + type: array + result: + description: If result is set with the Failure field, the object + will be persisted to storage and then deleted, ensuring that other + clients can observe the deletion. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + code: + description: Suggested HTTP return code for this status, 0 if + not set. + format: int32 + type: integer + details: + description: Extended data associated with the reason. Each + reason may define its own extended details. This field is + optional and the data returned is not guaranteed to conform + to any schema except that defined by the reason type. + properties: + causes: + description: The Causes array includes more details associated + with the StatusReason failure. Not all StatusReasons may + provide detailed causes. + items: + description: StatusCause provides more information about + an api.Status failure, including cases when multiple + errors are encountered. + properties: + field: + description: "The field of the resource that has caused + this error, as named by its JSON serialization. + May include dot and postfix notation for nested + attributes. Arrays are zero-indexed. Fields may + appear more than once in an array of causes due + to fields having multiple errors. Optional. \n Examples: + \ \"name\" - the field \"name\" on the current + resource \"items[0].name\" - the field \"name\" + on the first array entry in \"items\"" + type: string + message: + description: A human-readable description of the cause + of the error. This field may be presented as-is + to a reader. + type: string + reason: + description: A machine-readable description of the + cause of the error. If this value is empty there + is no information available. + type: string + type: object + type: array + group: + description: The group attribute of the resource associated + with the status StatusReason. + type: string + kind: + description: 'The kind attribute of the resource associated + with the status StatusReason. On some operations may differ + from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: The name attribute of the resource associated + with the status StatusReason (when there is a single name + which can be described). + type: string + retryAfterSeconds: + description: If specified, the time in seconds before the + operation should be retried. Some errors may indicate + the client must take an alternate action - for those errors + this field may indicate how long to wait before taking + the alternate action. + format: int32 + type: integer + uid: + description: 'UID of the resource. (when there is a single + resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + message: + description: A human-readable description of the status of this + operation. + type: string + metadata: + description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + properties: + continue: + description: continue may be set if the user set a limit + on the number of items returned, and indicates that the + server has more data available. The value is opaque and + may be used to issue another request to the endpoint that + served this list to retrieve the next set of available + objects. Continuing a consistent list may not be possible + if the server configuration has changed or more than a + few minutes have passed. The resourceVersion field returned + when using this continue value will be identical to the + value in the first response, unless you have received + this token from an error message. + type: string + resourceVersion: + description: 'String that identifies the server''s internal + version of this object that can be used by clients to + determine when objects have changed. Value must be treated + as opaque by clients and passed unmodified back to the + server. Populated by the system. Read-only. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + selfLink: + description: selfLink is a URL representing this object. + Populated by the system. Read-only. + type: string + type: object + reason: + description: A machine-readable description of why this operation + is in the "Failure" status. If this value is empty there is + no information available. A Reason clarifies an HTTP status + code but does not override it. + type: string + status: + description: 'Status of the operation. One of: "Success" or + "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + type: string + type: object + required: + - pending + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: "ManagedFields maps workflow-id and version to the set + of fields that are managed by that workflow. This is mostly for internal + housekeeping, and users typically shouldn't need to set or understand + this field. A workflow can be the user's name, a controller's name, + or the name of a specific apply path like \"ci-cd\". The set of fields + is always in the version that the workflow used when modifying the + object. \n This field is alpha and can be changed or removed without + notice." + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet and the + group version of the resource that the fieldset applies to. + properties: + apiVersion: + description: APIVersion defines the version of this resource that + this field set applies to. The format is "group/version" just + like the top-level APIVersion field. It is necessary to track + the version of a field set because it cannot be automatically + converted. + type: string + fields: + additionalProperties: true + description: Fields identifies a set of fields. + type: object + manager: + description: Manager is an identifier of the workflow managing + these fields. + type: string + operation: + description: Operation is the type of operation which lead to + this ManagedFieldsEntry being created. The only valid values + for this field are 'Apply' and 'Update'. + type: string + time: + description: Time is timestamp of when these fields were set. + It should always be empty if Operation is 'Apply' + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required when + creating resources, although some resources may allow a client to + request the generation of an appropriate name automatically. Name + is primarily intended for creation idempotence and configuration definition. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. + An empty namespace is equivalent to the \"default\" namespace, but + \"default\" is the canonical representation. Not all objects are required + to be scoped to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this list + will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you + identify an owning object. An owning object must be in the same + namespace as the dependent, or be cluster-scoped, so there is no + namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. To + set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: "An opaque value that represents the internal version of + this object that can be used by clients to determine when objects + have changed. May be used for optimistic concurrency, change detection, + and the watch operation on a resource or set of resources. Clients + must treat these values as opaque and passed unmodified back to the + server. They may only be valid for a particular resource or set of + resources. \n Populated by the system. Read-only. Value must be treated + as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" + type: string + selfLink: + description: SelfLink is a URL representing this object. Populated by + the system. Read-only. + type: string + uid: + description: "UID is the unique in time and space value for this object. + It is typically generated by the server on successful creation of + a resource and is not allowed to change on PUT operations. \n Populated + by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + type: string + type: object + output: + properties: + primaryKey: + type: string + redisCacheName: + type: string + secondaryKey: + type: string + type: object + spec: + description: RedisCacheSpec defines the desired state of RedisCache + properties: + location: + minLength: 0 + type: string + properties: + description: RedisCacheProperties the properties of the Redis Cache. + properties: + enableNonSslPort: + type: boolean + sku: + description: RedisCacheSku the SKU of the Redis Cache. + properties: + capacity: + format: int32 + type: integer + family: + type: string + name: + description: 'Name - The SKU name. Required for account creation; + optional for update. Possible values include: ''StandardLRS'', + ''StandardGRS'', ''StandardRAGRS'', ''StandardZRS'', ''PremiumLRS'', + ''PremiumZRS'', ''StandardGZRS'', ''StandardRAGZRS''' + type: string + type: object + type: object + resourceGroup: + type: string + required: + - resourceGroup + type: object + status: + description: RedisCacheStatus defines the observed state of RedisCache + properties: + provisioned: + type: boolean + provisioning: + description: DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` Generation int64 `json:"generation,omitempty"` + type: boolean + type: object + type: object + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/azure.microsoft.com_storages.yaml b/config/crd/bases/azure.microsoft.com_storages.yaml new file mode 100644 index 00000000000..36bd1ce2c1f --- /dev/null +++ b/config/crd/bases/azure.microsoft.com_storages.yaml @@ -0,0 +1,493 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: storages.azure.microsoft.com +spec: + group: azure.microsoft.com + names: + kind: Storage + plural: storages + scope: "" + subresources: + status: {} + validation: + openAPIV3Schema: + description: Storage is the Schema for the storages API + properties: + additionalResources: + description: StorageAdditionalResources holds the additional resources + properties: + secrets: + items: + type: string + type: array + type: object + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + description: ObjectMeta is metadata that all persisted resources must have, + which includes all objects users must create. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with + a resource that may be set by external tools to store and retrieve + arbitrary metadata. They are not queryable and should be preserved + when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + clusterName: + description: The name of the cluster which the object belongs to. This + is used to distinguish resources with same name and namespace in different + clusters. This field is not set anywhere right now and apiserver is + going to ignore it if set in create or update request. + type: string + creationTimestamp: + description: "CreationTimestamp is a timestamp representing the server + time when this object was created. It is not guaranteed to be set + in happens-before order across separate operations. Clients may not + set this value. It is represented in RFC3339 form and is in UTC. \n + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + deletionGracePeriodSeconds: + description: Number of seconds allowed for this object to gracefully + terminate before it will be removed from the system. Only set when + deletionTimestamp is also set. May only be shortened. Read-only. + format: int64 + type: integer + deletionTimestamp: + description: "DeletionTimestamp is RFC 3339 date and time at which this + resource will be deleted. This field is set by the server when a graceful + deletion is requested by the user, and is not directly settable by + a client. The resource is expected to be deleted (no longer visible + from resource lists, and not reachable by name) after the time in + this field, once the finalizers list is empty. As long as the finalizers + list contains items, deletion is blocked. Once the deletionTimestamp + is set, this value may not be unset or be set further into the future, + although it may be shortened or the resource may be deleted prior + to this time. For example, a user may request that a pod is deleted + in 30 seconds. The Kubelet will react by sending a graceful termination + signal to the containers in the pod. After that 30 seconds, the Kubelet + will send a hard termination signal (SIGKILL) to the container and + after cleanup, remove the pod from the API. In the presence of network + partitions, this object may still exist after this timestamp, until + an administrator or automated process can determine the resource is + fully terminated. If not set, graceful deletion of the object has + not been requested. \n Populated by the system when a graceful deletion + is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" + format: date-time + type: string + finalizers: + description: Must be empty before the object is deleted from the registry. + Each entry is an identifier for the responsible component that will + remove the entry from the list. If the deletionTimestamp of the object + is non-nil, entries in this list can only be removed. + items: + type: string + type: array + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules as the Name + field, and may be truncated by the length of the suffix required to + make the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return a 409 - + instead, it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time allotted, + and the client should retry (optionally after the time indicated in + the Retry-After header). \n Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + generation: + description: A sequence number representing a specific generation of + the desired state. Populated by the system. Read-only. + format: int64 + type: integer + initializers: + description: "An initializer is a controller which enforces some system + invariant at object creation time. This field is a list of initializers + that have not yet acted on this object. If nil or empty, this object + has been completely initialized. Otherwise, the object is considered + uninitialized and is hidden (in list/watch and get calls) from clients + that haven't explicitly asked to observe uninitialized objects. \n + When an object is created, the system will populate this list with + the current set of initializers. Only privileged users may set or + modify this list. Once it is empty, it may not be modified further + by any user. \n DEPRECATED - initializers are an alpha field and will + be removed in v1.15." + properties: + pending: + description: Pending is a list of initializers that must execute + in order before this object is visible. When the last pending + initializer is removed, and no failing result is set, the initializers + struct will be set to nil and the object is considered as initialized + and visible to all clients. + items: + description: Initializer is information about an initializer that + has not yet completed. + properties: + name: + description: name of the process that is responsible for initializing + this object. + type: string + required: + - name + type: object + type: array + result: + description: If result is set with the Failure field, the object + will be persisted to storage and then deleted, ensuring that other + clients can observe the deletion. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + code: + description: Suggested HTTP return code for this status, 0 if + not set. + format: int32 + type: integer + details: + description: Extended data associated with the reason. Each + reason may define its own extended details. This field is + optional and the data returned is not guaranteed to conform + to any schema except that defined by the reason type. + properties: + causes: + description: The Causes array includes more details associated + with the StatusReason failure. Not all StatusReasons may + provide detailed causes. + items: + description: StatusCause provides more information about + an api.Status failure, including cases when multiple + errors are encountered. + properties: + field: + description: "The field of the resource that has caused + this error, as named by its JSON serialization. + May include dot and postfix notation for nested + attributes. Arrays are zero-indexed. Fields may + appear more than once in an array of causes due + to fields having multiple errors. Optional. \n Examples: + \ \"name\" - the field \"name\" on the current + resource \"items[0].name\" - the field \"name\" + on the first array entry in \"items\"" + type: string + message: + description: A human-readable description of the cause + of the error. This field may be presented as-is + to a reader. + type: string + reason: + description: A machine-readable description of the + cause of the error. If this value is empty there + is no information available. + type: string + type: object + type: array + group: + description: The group attribute of the resource associated + with the status StatusReason. + type: string + kind: + description: 'The kind attribute of the resource associated + with the status StatusReason. On some operations may differ + from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: The name attribute of the resource associated + with the status StatusReason (when there is a single name + which can be described). + type: string + retryAfterSeconds: + description: If specified, the time in seconds before the + operation should be retried. Some errors may indicate + the client must take an alternate action - for those errors + this field may indicate how long to wait before taking + the alternate action. + format: int32 + type: integer + uid: + description: 'UID of the resource. (when there is a single + resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + message: + description: A human-readable description of the status of this + operation. + type: string + metadata: + description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + properties: + continue: + description: continue may be set if the user set a limit + on the number of items returned, and indicates that the + server has more data available. The value is opaque and + may be used to issue another request to the endpoint that + served this list to retrieve the next set of available + objects. Continuing a consistent list may not be possible + if the server configuration has changed or more than a + few minutes have passed. The resourceVersion field returned + when using this continue value will be identical to the + value in the first response, unless you have received + this token from an error message. + type: string + resourceVersion: + description: 'String that identifies the server''s internal + version of this object that can be used by clients to + determine when objects have changed. Value must be treated + as opaque by clients and passed unmodified back to the + server. Populated by the system. Read-only. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + selfLink: + description: selfLink is a URL representing this object. + Populated by the system. Read-only. + type: string + type: object + reason: + description: A machine-readable description of why this operation + is in the "Failure" status. If this value is empty there is + no information available. A Reason clarifies an HTTP status + code but does not override it. + type: string + status: + description: 'Status of the operation. One of: "Success" or + "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + type: string + type: object + required: + - pending + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize + and categorize (scope and select) objects. May match selectors of + replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + managedFields: + description: "ManagedFields maps workflow-id and version to the set + of fields that are managed by that workflow. This is mostly for internal + housekeeping, and users typically shouldn't need to set or understand + this field. A workflow can be the user's name, a controller's name, + or the name of a specific apply path like \"ci-cd\". The set of fields + is always in the version that the workflow used when modifying the + object. \n This field is alpha and can be changed or removed without + notice." + items: + description: ManagedFieldsEntry is a workflow-id, a FieldSet and the + group version of the resource that the fieldset applies to. + properties: + apiVersion: + description: APIVersion defines the version of this resource that + this field set applies to. The format is "group/version" just + like the top-level APIVersion field. It is necessary to track + the version of a field set because it cannot be automatically + converted. + type: string + fields: + additionalProperties: true + description: Fields identifies a set of fields. + type: object + manager: + description: Manager is an identifier of the workflow managing + these fields. + type: string + operation: + description: Operation is the type of operation which lead to + this ManagedFieldsEntry being created. The only valid values + for this field are 'Apply' and 'Update'. + type: string + time: + description: Time is timestamp of when these fields were set. + It should always be empty if Operation is 'Apply' + format: date-time + type: string + type: object + type: array + name: + description: 'Name must be unique within a namespace. Is required when + creating resources, although some resources may allow a client to + request the generation of an appropriate name automatically. Name + is primarily intended for creation idempotence and configuration definition. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. + An empty namespace is equivalent to the \"default\" namespace, but + \"default\" is the canonical representation. Not all objects are required + to be scoped to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this list + will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you + identify an owning object. An owning object must be in the same + namespace as the dependent, or be cluster-scoped, so there is no + namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. To + set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + resourceVersion: + description: "An opaque value that represents the internal version of + this object that can be used by clients to determine when objects + have changed. May be used for optimistic concurrency, change detection, + and the watch operation on a resource or set of resources. Clients + must treat these values as opaque and passed unmodified back to the + server. They may only be valid for a particular resource or set of + resources. \n Populated by the system. Read-only. Value must be treated + as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" + type: string + selfLink: + description: SelfLink is a URL representing this object. Populated by + the system. Read-only. + type: string + uid: + description: "UID is the unique in time and space value for this object. + It is typically generated by the server on successful creation of + a resource and is not allowed to change on PUT operations. \n Populated + by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" + type: string + type: object + output: + properties: + connectionString1: + type: string + connectionString2: + type: string + key1: + type: string + key2: + type: string + storageAccountName: + type: string + type: object + spec: + description: StorageSpec defines the desired state of Storage + properties: + accessTier: + description: AccessTier enumerates the values for access tier. Only + one of the following access tiers may be specified. If none of the + following access tiers is specified, the default one is Hot. + enum: + - Cool + - Hot + type: string + kind: + description: StorageKind enumerates the values for kind. Only one of + the following kinds may be specified. If none of the following kinds + is specified, the default one is StorageV2. + enum: + - BlobStorage + - BlockBlobStorage + - FileStorage + - Storage + - StorageV2 + type: string + location: + minLength: 0 + type: string + resourceGroup: + type: string + sku: + description: Sku the SKU of the storage account. + properties: + name: + description: 'Name - The SKU name. Required for account creation; + optional for update. Possible values include: ''StandardLRS'', + ''StandardGRS'', ''StandardRAGRS'', ''StandardZRS'', ''PremiumLRS'', + ''PremiumZRS'', ''StandardGZRS'', ''StandardRAGZRS''' + enum: + - Premium_LRS + - Premium_ZRS + - Standard_GRS + - Standard_GZRS + - Standard_LRS + - Standard_RAGRS + - Standard_RAGZRS + - Standard_ZRS + type: string + type: object + supportsHttpsTrafficOnly: + type: boolean + required: + - resourceGroup + type: object + status: + description: StorageStatus defines the observed state of Storage + properties: + provisioned: + type: boolean + provisioning: + description: DeploymentName string `json:"deploymentName,omitempty"` + ProvisioningState string `json:"provisioningState,omitempty"` Generation int64 `json:"generation,omitempty"` + type: boolean + type: object + type: object + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 24e4d6bcd14..d2d171c55e8 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,6 +2,9 @@ # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/default resources: +- bases/service.azure_storages.yaml +- bases/service.azure_cosmosdbs.yaml +- bases/service.azure_rediscaches.yaml - bases/azure.microsoft.com_eventhubs.yaml - bases/azure.microsoft.com_resourcegroups.yaml - bases/azure.microsoft.com_eventhubnamespaces.yaml diff --git a/config/crd/patches/cainjection_in_cosmosdbs.yaml b/config/crd/patches/cainjection_in_cosmosdbs.yaml new file mode 100644 index 00000000000..d14bfce13fd --- /dev/null +++ b/config/crd/patches/cainjection_in_cosmosdbs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) + name: cosmosdbs.service.azure diff --git a/config/crd/patches/cainjection_in_rediscaches.yaml b/config/crd/patches/cainjection_in_rediscaches.yaml new file mode 100644 index 00000000000..56a5e83784e --- /dev/null +++ b/config/crd/patches/cainjection_in_rediscaches.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: rediscaches.service.azure diff --git a/config/crd/patches/cainjection_in_storages.yaml b/config/crd/patches/cainjection_in_storages.yaml new file mode 100644 index 00000000000..e371d696a51 --- /dev/null +++ b/config/crd/patches/cainjection_in_storages.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) + name: storages.service.azure diff --git a/config/crd/patches/webhook_in_cosmosdbs.yaml b/config/crd/patches/webhook_in_cosmosdbs.yaml new file mode 100644 index 00000000000..7ecf9d9fe29 --- /dev/null +++ b/config/crd/patches/webhook_in_cosmosdbs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cosmosdbs.service.azure +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_rediscaches.yaml b/config/crd/patches/webhook_in_rediscaches.yaml new file mode 100644 index 00000000000..3a0e04368dc --- /dev/null +++ b/config/crd/patches/webhook_in_rediscaches.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: rediscaches.service.azure +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_storages.yaml b/config/crd/patches/webhook_in_storages.yaml new file mode 100644 index 00000000000..5811750b289 --- /dev/null +++ b/config/crd/patches/webhook_in_storages.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: storages.service.azure +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/default/manager_role_patch.yaml b/config/default/manager_role_patch.yaml new file mode 100644 index 00000000000..ecf4ba85b76 --- /dev/null +++ b/config/default/manager_role_patch.yaml @@ -0,0 +1,80 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - service.azure + resources: + - cosmosdbs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - cosmosdbs/status + verbs: + - get + - patch + - update +- apiGroups: + - service.azure + resources: + - rediscaches + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - rediscaches/status + verbs: + - get + - patch + - update +- apiGroups: + - service.azure + resources: + - storages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - storages/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 54f2aeb0bff..49d64a51cd5 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -152,3 +152,63 @@ rules: - patch - update - watch +- apiGroups: + - service.azure + resources: + - rediscaches/status + verbs: + - get + - patch + - update +- apiGroups: + - service.azure + resources: + - rediscaches + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - storages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - storages/status + verbs: + - get + - patch + - update +- apiGroups: + - service.azure + resources: + - cosmosdbs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - service.azure + resources: + - cosmosdbs/status + verbs: + - get + - patch + - update \ No newline at end of file diff --git a/config/samples/.gitkeep b/config/samples/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/config/samples/azure_v1_cosmosdb.yaml b/config/samples/azure_v1_cosmosdb.yaml new file mode 100644 index 00000000000..0741c205517 --- /dev/null +++ b/config/samples/azure_v1_cosmosdb.yaml @@ -0,0 +1,10 @@ +apiVersion: azure.microsoft.com/v1 +kind: CosmosDB +metadata: + name: cosmosdb-sample1908xyzkj +spec: + kind: GlobalDocumentDB + location: westus + resourceGroup: resourcegroup-sample-1908 + properties: + databaseAccountOfferType: Standard diff --git a/config/samples/azure_v1_rediscache.yaml b/config/samples/azure_v1_rediscache.yaml new file mode 100644 index 00000000000..a78959b3dfd --- /dev/null +++ b/config/samples/azure_v1_rediscache.yaml @@ -0,0 +1,13 @@ +apiVersion: azure.microsoft.com/v1 +kind: RedisCache +metadata: + name: rediscache-sample1908xyzkj +spec: + location: westus + resourceGroup: resourcegroup-sample-1908 + properties: + sku: + name: Basic + family: C + capacity: 1 + enableNonSslPort: true diff --git a/config/samples/azure_v1_resourcegroup.yaml b/config/samples/azure_v1_resourcegroup.yaml index fe67d8499f0..1f2009eaec1 100644 --- a/config/samples/azure_v1_resourcegroup.yaml +++ b/config/samples/azure_v1_resourcegroup.yaml @@ -1,6 +1,6 @@ apiVersion: azure.microsoft.com/v1 kind: ResourceGroup metadata: - name: resourcegroup-sample-1907 + name: resourcegroup-sample-1908 spec: location: "westus" diff --git a/config/samples/azure_v1_storage.yaml b/config/samples/azure_v1_storage.yaml new file mode 100644 index 00000000000..88a1d9c1671 --- /dev/null +++ b/config/samples/azure_v1_storage.yaml @@ -0,0 +1,12 @@ +apiVersion: azure.microsoft.com/v1 +kind: Storage +metadata: + name: storagesample1908xyzkj +spec: + location: westus + resourceGroup: resourcegroup-sample-1908 + sku: + name: Standard_RAGRS + kind: StorageV2 + accessTier: Hot + supportsHttpsTrafficOnly: true diff --git a/controllers/cosmosdb_controller.go b/controllers/cosmosdb_controller.go new file mode 100644 index 00000000000..28d019538c7 --- /dev/null +++ b/controllers/cosmosdb_controller.go @@ -0,0 +1,259 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package controllers + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/errhelp" + "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/cosmosdbs" + "k8s.io/client-go/tools/record" +) + +const cosmosDBFinalizerName = "cosmosdb.finalizers.azure.com" + +// CosmosDBReconciler reconciles a CosmosDB object +type CosmosDBReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder + RequeueTime time.Duration +} + +// +kubebuilder:rbac:groups=service.azure,resources=cosmosdbs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=service.azure,resources=cosmosdbs/status,verbs=get;update;patch + +// Reconcile function does the main reconciliation loop of the operator +func (r *CosmosDBReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("cosmosdb", req.NamespacedName) + + // Fetch the CosmosDB instance + var instance azurev1.CosmosDB + + requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) + if err != nil { + requeueAfter = 30 + } + + if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { + log.Error(err, "unable to fetch CosmosDB") + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + log.Info("Getting CosmosDB Account", "CosmosDB.Namespace", instance.Namespace, "CosmosDB.Name", instance.Name) + log.V(1).Info("Describing CosmosDB Account", "CosmosDB", instance) + + if helpers.IsBeingDeleted(&instance) { + if helpers.HasFinalizer(&instance, cosmosDBFinalizerName) { + if err := r.deleteExternal(&instance); err != nil { + log.Info("Error", "Delete CosmosDB failed with ", err) + return ctrl.Result{}, err + } + + helpers.RemoveFinalizer(&instance, cosmosDBFinalizerName) + if err := r.Update(context.Background(), &instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil + } + + if !helpers.HasFinalizer(&instance, cosmosDBFinalizerName) { + if err := r.addFinalizer(&instance); err != nil { + log.Info("Error", "Adding cosmosDB finalizer failed with ", err) + return ctrl.Result{}, err + } + } + + if !instance.IsSubmitted() { + if err := r.reconcileExternal(&instance); err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + log.Info("Requeuing as the async operation is not complete") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second * time.Duration(requeueAfter), + }, nil + } + return ctrl.Result{}, fmt.Errorf("error reconciling cosmosdb in azure: %v", err) + } + return ctrl.Result{}, nil + } + + r.Recorder.Event(&instance, "Normal", "Provisioned", "CosmosDB "+instance.ObjectMeta.Name+" provisioned ") + return ctrl.Result{}, nil +} + +func (r *CosmosDBReconciler) addFinalizer(instance *azurev1.CosmosDB) error { + helpers.AddFinalizer(instance, cosmosDBFinalizerName) + err := r.Update(context.Background(), instance) + if err != nil { + return fmt.Errorf("failed to update finalizer: %v", err) + } + r.Recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("finalizer %s added", cosmosDBFinalizerName)) + return nil +} + +func (r *CosmosDBReconciler) reconcileExternal(instance *azurev1.CosmosDB) error { + ctx := context.Background() + location := instance.Spec.Location + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + kind := instance.Spec.Kind + dbType := instance.Spec.Properties.DatabaseAccountOfferType + + // write information back to instance + instance.Status.Provisioning = true + + if err := r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + _, err := cosmosdbs.CreateCosmosDB(ctx, groupName, name, location, kind, dbType, nil) + if err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + r.Recorder.Event(instance, "Normal", "Provisioning", name+" provisioning") + return err + } + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") + instance.Status.Provisioning = false + errUpdate := r.Status().Update(ctx, instance) + if errUpdate != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + return err + } + + instance.Status.Provisioning = false + instance.Status.Provisioned = true + + if err = r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + return nil +} + +func (r *CosmosDBReconciler) deleteExternal(instance *azurev1.CosmosDB) error { + ctx := context.Background() + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + _, err := cosmosdbs.DeleteCosmosDB(ctx, groupName, name) + if err != nil { + if errhelp.IsStatusCode204(err) { + r.Recorder.Event(instance, "Warning", "DoesNotExist", "Resource to delete does not exist") + return nil + } + + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't delete resouce in azure") + return err + } + + r.Recorder.Event(instance, "Normal", "Deleted", name+" deleted") + return nil +} + +// SetupWithManager sets up the controller functions +func (r *CosmosDBReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&azurev1.CosmosDB{}). + Complete(r) +} + +/* Below code was from prior to refactor. + Left here for future reference for pulling out values post deployment. + + +func (r *CosmosDBReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.CosmosDB, error) { + ctx := context.Background() + log := r.Log.WithValues("cosmosdb", req.NamespacedName) + + resource := &servicev1alpha1.CosmosDB{} + r.Get(ctx, req.NamespacedName, resource) + log.Info("Getting CosmosDB Account", "CosmosDB.Namespace", resource.Namespace, "CosmosDB.Name", resource.Name) + + resourceCopy := resource.DeepCopy() + resourceCopy.Status.DeploymentName = deploymentName + resourceCopy.Status.ProvisioningState = provisioningState + + err := r.Status().Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update CosmosDB status") + return nil, err + } + log.V(1).Info("Updated Status", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.Status", resourceCopy.Status) + + if helpers.IsDeploymentComplete(provisioningState) { + if outputs != nil { + resourceCopy.Output.CosmosDBName = helpers.GetOutput(outputs, "cosmosDBName") + resourceCopy.Output.PrimaryMasterKey = helpers.GetOutput(outputs, "primaryMasterKey") + } + + err := r.syncAdditionalResourcesAndOutput(req, resourceCopy) + if err != nil { + log.Error(err, "error syncing resources") + return nil, err + } + log.V(1).Info("Updated additional resources", "CosmosDB.Namespace", resourceCopy.Namespace, "CosmosDB.Name", resourceCopy.Name, "CosmosDB.AdditionalResources", resourceCopy.AdditionalResources, "CosmosDB.Output", resourceCopy.Output) + } + + return resourceCopy, nil +} + +func (r *CosmosDBReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.CosmosDB) (err error) { + ctx := context.Background() + log := r.Log.WithValues("cosmosdb", req.NamespacedName) + + secrets := []string{} + secretData := map[string]string{ + "cosmosDBName": "{{.Obj.Output.CosmosDBName}}", + "primaryMasterKey": "{{.Obj.Output.PrimaryMasterKey}}", + } + secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) + secrets = append(secrets, secret) + + resourceCopy := s.DeepCopy() + resourceCopy.AdditionalResources.Secrets = secrets + + err = r.Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update CosmosDB status") + return err + } + + return nil +}*/ diff --git a/controllers/rediscache_controller.go b/controllers/rediscache_controller.go new file mode 100644 index 00000000000..b8cfef9064c --- /dev/null +++ b/controllers/rediscache_controller.go @@ -0,0 +1,260 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package controllers + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/errhelp" + "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/rediscaches" + "k8s.io/client-go/tools/record" +) + +const redisCacheFinalizerName = "rediscache.finalizers.azure.com" + +// RedisCacheReconciler reconciles a RedisCache object +type RedisCacheReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder + RequeueTime time.Duration +} + +// +kubebuilder:rbac:groups=service.azure,resources=rediscaches,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=service.azure,resources=rediscaches/status,verbs=get;update;patch + +func (r *RedisCacheReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("rediscache", req.NamespacedName) + + // Fetch the Redis Cache instance + var instance azurev1.RedisCache + + requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) + if err != nil { + requeueAfter = 30 + } + + if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { + log.Error(err, "unable to fetch RedisCache") + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + return ctrl.Result{}, client.IgnoreNotFound(err) + } + log.Info("Getting Redis Cache", "RedisCache.Namespace", instance.Namespace, "RedisCache.Name", instance.Name) + log.V(1).Info("Describing Redis Cache", "RedisCache", instance) + + if helpers.IsBeingDeleted(&instance) { + if helpers.HasFinalizer(&instance, redisCacheFinalizerName) { + if err := r.deleteExternal(&instance); err != nil { + log.Info("Error", "Delete Redis Cache failed with ", err) + return ctrl.Result{}, err + } + + helpers.RemoveFinalizer(&instance, redisCacheFinalizerName) + if err := r.Update(context.Background(), &instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil + } + + if !helpers.HasFinalizer(&instance, redisCacheFinalizerName) { + if err := r.addFinalizer(&instance); err != nil { + log.Info("Error", "Adding redis cache finalizer failed with ", err) + return ctrl.Result{}, err + } + } + + if !instance.IsSubmitted() { + if err := r.reconcileExternal(&instance); err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + log.Info("Requeuing as the async operation is not complete") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second * time.Duration(requeueAfter), + }, nil + } + return ctrl.Result{}, fmt.Errorf("error reconciling redis cache in azure: %v", err) + } + return ctrl.Result{}, nil + } + + r.Recorder.Event(&instance, "Normal", "Provisioned", "RedisCache "+instance.ObjectMeta.Name+" provisioned ") + return ctrl.Result{}, nil +} + +func (r *RedisCacheReconciler) addFinalizer(instance *azurev1.RedisCache) error { + helpers.AddFinalizer(instance, redisCacheFinalizerName) + err := r.Update(context.Background(), instance) + if err != nil { + return fmt.Errorf("failed to update finalizer: %v", err) + } + r.Recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("finalizer %s added", redisCacheFinalizerName)) + return nil +} + +func (r *RedisCacheReconciler) reconcileExternal(instance *azurev1.RedisCache) error { + ctx := context.Background() + location := instance.Spec.Location + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + sku := instance.Spec.Properties.Sku + enableNonSSLPort := instance.Spec.Properties.EnableNonSslPort + + // write information back to instance + instance.Status.Provisioning = true + + if err := r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + _, err := rediscaches.CreateRedisCache(ctx, groupName, name, location, sku, enableNonSSLPort, nil) + if err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + r.Recorder.Event(instance, "Normal", "Provisioning", name+" provisioning") + return err + } + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") + instance.Status.Provisioning = false + errUpdate := r.Status().Update(ctx, instance) + if errUpdate != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + return err + } + + instance.Status.Provisioning = false + instance.Status.Provisioned = true + + if err = r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + return nil +} + +func (r *RedisCacheReconciler) deleteExternal(instance *azurev1.RedisCache) error { + ctx := context.Background() + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + _, err := rediscaches.DeleteRedisCache(ctx, groupName, name) + if err != nil { + if errhelp.IsStatusCode204(err) { + r.Recorder.Event(instance, "Warning", "DoesNotExist", "Resource to delete does not exist") + return nil + } + + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't delete resouce in azure") + return err + } + + r.Recorder.Event(instance, "Normal", "Deleted", name+" deleted") + return nil +} + +// SetupWithManager sets up the controller functions +func (r *RedisCacheReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&azurev1.RedisCache{}). + Complete(r) +} + +/* Below code was from prior to refactor. +Left here for future reference for pulling out values post deployment. + +func (r *RedisCacheReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.RedisCache, error) { + ctx := context.Background() + log := r.Log.WithValues("Redis Cache", req.NamespacedName) + + resource := &servicev1alpha1.RedisCache{} + r.Get(ctx, req.NamespacedName, resource) + log.Info("Getting Redis Cache", "RedisCache.Namespace", resource.Namespace, "RedisCache.Name", resource.Name) + + resourceCopy := resource.DeepCopy() + resourceCopy.Status.DeploymentName = deploymentName + resourceCopy.Status.ProvisioningState = provisioningState + + err := r.Status().Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Redis Cache status") + return nil, err + } + log.V(1).Info("Updated Status", "Redis Cache.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.Status", resourceCopy.Status) + + if helpers.IsDeploymentComplete(provisioningState) { + if outputs != nil { + resourceCopy.Output.RedisCacheName = helpers.GetOutput(outputs, "redisCacheName") + resourceCopy.Output.PrimaryKey = helpers.GetOutput(outputs, "primaryKey") + resourceCopy.Output.SecondaryKey = helpers.GetOutput(outputs, "secondaryKey") + } + + err := r.syncAdditionalResourcesAndOutput(req, resourceCopy) + if err != nil { + log.Error(err, "error syncing resources") + return nil, err + } + log.V(1).Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "RedisCache.Name", resourceCopy.Name, "RedisCache.AdditionalResources", resourceCopy.AdditionalResources, "RedisCache.Output", resourceCopy.Output) + } + + return resourceCopy, nil +} + +func (r *RedisCacheReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.RedisCache) (err error) { + ctx := context.Background() + log := r.Log.WithValues("redisCache", req.NamespacedName) + + secrets := []string{} + secretData := map[string]string{ + "redisCacheName": "{{.Obj.Output.RedisCacheName}}", + "primaryKey": "{{.Obj.Output.PrimaryKey}}", + "secondaryKey": "{{.Obj.Output.SecondaryKey}}", + } + secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) + secrets = append(secrets, secret) + + resourceCopy := s.DeepCopy() + resourceCopy.AdditionalResources.Secrets = secrets + + err = r.Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Redis Cache status") + return err + } + + return nil +} +*/ diff --git a/controllers/storage_controller.go b/controllers/storage_controller.go new file mode 100644 index 00000000000..6408f88709f --- /dev/null +++ b/controllers/storage_controller.go @@ -0,0 +1,269 @@ +/* +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE +*/ + +package controllers + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/errhelp" + helpers "github.com/Azure/azure-service-operator/pkg/helpers" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/storages" + "k8s.io/client-go/tools/record" +) + +const storageFinalizerName = "storage.finalizers.azure.com" + +// StorageReconciler reconciles a Storage object +type StorageReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder + RequeueTime time.Duration +} + +// +kubebuilder:rbac:groups=service.azure,resources=storages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=service.azure,resources=storages/status,verbs=get;update;patch + +// Reconcile function does the main reconciliation loop of the operator +func (r *StorageReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("storage", req.NamespacedName) + + // Fetch the Storage instance + var instance azurev1.Storage + + requeueAfter, err := strconv.Atoi(os.Getenv("REQUEUE_AFTER")) + if err != nil { + requeueAfter = 30 + } + + if err := r.Get(ctx, req.NamespacedName, &instance); err != nil { + log.Error(err, "unable to retrieve storage resource", "err", err.Error()) + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. + //return ctrl.Result{}, helpers.IgnoreKubernetesResourceNotFound(err) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + log.Info("Getting Storage Account", "Storage.Namespace", instance.Namespace, "Storage.Name", instance.Name) + log.V(1).Info("Describing Storage Account", "Storage", instance) + + if helpers.IsBeingDeleted(&instance) { + if helpers.HasFinalizer(&instance, storageFinalizerName) { + if err := r.deleteExternal(&instance); err != nil { + log.Info("Error", "Delete Storage failed with ", err) + return ctrl.Result{}, err + } + + helpers.RemoveFinalizer(&instance, storageFinalizerName) + if err := r.Update(context.Background(), &instance); err != nil { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil + } + + if !helpers.HasFinalizer(&instance, storageFinalizerName) { + if err := r.addFinalizer(&instance); err != nil { + log.Info("Error", "Adding storage finalizer failed with ", err) + return ctrl.Result{}, err + } + } + + if !instance.IsSubmitted() { + if err := r.reconcileExternal(&instance); err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + log.Info("Requeuing as the async operation is not complete") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Second * time.Duration(requeueAfter), + }, nil + } + return ctrl.Result{}, fmt.Errorf("error reconciling storage in azure: %v", err) + } + return ctrl.Result{}, nil + } + + r.Recorder.Event(&instance, "Normal", "Provisioned", "Storage "+instance.ObjectMeta.Name+" provisioned ") + return ctrl.Result{}, nil +} + +func (r *StorageReconciler) addFinalizer(instance *azurev1.Storage) error { + helpers.AddFinalizer(instance, storageFinalizerName) + err := r.Update(context.Background(), instance) + if err != nil { + return fmt.Errorf("failed to update finalizer: %v", err) + } + r.Recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("finalizer %s added", storageFinalizerName)) + return nil +} + +func (r *StorageReconciler) reconcileExternal(instance *azurev1.Storage) error { + ctx := context.Background() + location := instance.Spec.Location + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + sku := instance.Spec.Sku + kind := instance.Spec.Kind + accessTier := instance.Spec.AccessTier + enableHTTPSTrafficOnly := instance.Spec.EnableHTTPSTrafficOnly + + // write information back to instance + instance.Status.Provisioning = true + + if err := r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + _, err := storages.CreateStorage(ctx, groupName, name, location, sku, kind, nil, accessTier, enableHTTPSTrafficOnly) + if err != nil { + if errhelp.IsAsynchronousOperationNotComplete(err) || errhelp.IsGroupNotFound(err) { + r.Recorder.Event(instance, "Normal", "Provisioning", name+" provisioning") + return err + } + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't create resource in azure") + instance.Status.Provisioning = false + errUpdate := r.Status().Update(ctx, instance) + if errUpdate != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + return err + } + + instance.Status.Provisioning = false + instance.Status.Provisioned = true + + if err = r.Status().Update(ctx, instance); err != nil { + r.Recorder.Event(instance, "Warning", "Failed", "Unable to update instance") + } + + return nil +} + +func (r *StorageReconciler) deleteExternal(instance *azurev1.Storage) error { + ctx := context.Background() + name := instance.ObjectMeta.Name + groupName := instance.Spec.ResourceGroupName + _, err := storages.DeleteStorage(ctx, groupName, name) + if err != nil { + if errhelp.IsStatusCode204(err) { + r.Recorder.Event(instance, "Warning", "DoesNotExist", "Resource to delete does not exist") + return nil + } + + r.Recorder.Event(instance, "Warning", "Failed", "Couldn't delete resouce in azure") + return err + } + + r.Recorder.Event(instance, "Normal", "Deleted", name+" deleted") + return nil +} + +// SetupWithManager sets up the controller functions +func (r *StorageReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&azurev1.Storage{}). + Complete(r) +} + +/* Below code was from prior to refactor. + Left here for future reference for pulling out values post deployment. + +func (r *StorageReconciler) updateStatus(req ctrl.Request, resourceGroupName, deploymentName, provisioningState string, outputs interface{}) (*servicev1alpha1.Storage, error) { + ctx := context.Background() + log := r.Log.WithValues("storage", req.NamespacedName) + + resource := &servicev1alpha1.Storage{} + r.Get(ctx, req.NamespacedName, resource) + log.Info("Getting Storage Account", "Storage.Namespace", resource.Namespace, "Storage.Name", resource.Name) + + resourceCopy := resource.DeepCopy() + resourceCopy.Status.DeploymentName = deploymentName + resourceCopy.Status.ProvisioningState = provisioningState + + err := r.Status().Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Storage status") + return nil, err + } + log.V(1).Info("Updated Status", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.Status", resourceCopy.Status) + + if helpers.IsDeploymentComplete(provisioningState) { + if outputs != nil { + resourceCopy.Output.StorageAccountName = helpers.GetOutput(outputs, "storageAccountName") + resourceCopy.Output.Key1 = helpers.GetOutput(outputs, "key1") + resourceCopy.Output.Key2 = helpers.GetOutput(outputs, "key2") + resourceCopy.Output.ConnectionString1 = helpers.GetOutput(outputs, "connectionString1") + resourceCopy.Output.ConnectionString2 = helpers.GetOutput(outputs, "connectionString2") + } + + err := r.syncAdditionalResourcesAndOutput(req, resourceCopy) + if err != nil { + log.Error(err, "error syncing resources") + return nil, err + } + log.V(1).Info("Updated additional resources", "Storage.Namespace", resourceCopy.Namespace, "Storage.Name", resourceCopy.Name, "Storage.AdditionalResources", resourceCopy.AdditionalResources, "Storage.Output", resourceCopy.Output) + } + + return resourceCopy, nil +} + +func (r *StorageReconciler) syncAdditionalResourcesAndOutput(req ctrl.Request, s *servicev1alpha1.Storage) (err error) { + ctx := context.Background() + log := r.Log.WithValues("storage", req.NamespacedName) + + secrets := []string{} + secretData := map[string]string{ + "storageAccountName": "{{.Obj.Output.StorageAccountName}}", + "key1": "{{.Obj.Output.Key1}}", + "key2": "{{.Obj.Output.Key2}}", + "connectionString1": "{{.Obj.Output.ConnectionString1}}", + "connectionString2": "{{.Obj.Output.ConnectionString2}}", + } + secret := helpers.CreateSecret(s, s.Name, s.Namespace, secretData) + secrets = append(secrets, secret) + + resourceCopy := s.DeepCopy() + resourceCopy.AdditionalResources.Secrets = secrets + + err = r.Update(ctx, resourceCopy) + if err != nil { + log.Error(err, "unable to update Storage status") + return err + } + + return nil +} +*/ diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 30be158aed9..7b51ded2ab3 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -45,7 +45,9 @@ import ( var cfg *rest.Config var k8sClient client.Client + var k8sManager ctrl.Manager + var testEnv *envtest.Environment var resourceGroupName string var resourcegroupLocation string @@ -71,6 +73,9 @@ var _ = BeforeSuite(func(done Done) { logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { t := true @@ -141,8 +146,10 @@ var _ = BeforeSuite(func(done Done) { Expect(err).ToNot(HaveOccurred()) }() - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) + //k8sClient = k8sManager.GetClient() + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + //Expect(k8sClient).ToNot(BeNil()) // Create the Resourcegroup resource result, _ := resoucegroupsresourcemanager.CheckExistence(context.Background(), resourceGroupName) @@ -162,6 +169,7 @@ var _ = BeforeSuite(func(done Done) { var _ = AfterSuite(func(done Done) { //clean up the resources created for test + //clean up the resources created for test By("tearing down the test environment") _, _ = resoucegroupsresourcemanager.DeleteGroup(context.Background(), resourceGroupName) diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 00000000000..c06ba3be01e --- /dev/null +++ b/docs/development.md @@ -0,0 +1,122 @@ +# Development + +## Prerequisites + +* a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster, e.g. [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough). +* [kubebuilder](https://book.kubebuilder.io/quick-start.html#installation) +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +## Deploy Operator and Test + +### Test it locally + +1. Create Cluster. + + ``` + kind create cluster + export KUBECONFIG="$(kind get kubeconfig-path --name="kind")" + kubectl cluster-info + ``` + +1. Install CRDs. + + ``` + make install + ``` + +1. Run Controller. + + Setup the environment variables: + + ``` + export CLOUD_NAME=AzurePublicCloud + export TENANT_ID= + export SUBSCRIPTION_ID= + export CLIENT_ID= + export CLIENT_SECRET= + ``` + + Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): + + ``` + make run + ``` + + Refer to [kubebuilder's doc](https://book.kubebuilder.io/quick-start.html#test-it-out-locally). + +1. Create a Custom Resource. + + Create your CR (make sure to edit them first to specify the fields). Example: + + ``` + kubectl apply -f examples/service/v1alpha1/storage.yaml + ``` + +### Test it on a remote cluster + +1. Create Cluster. + + ``` + az aks create -g -n + az aks get-credentials -g -n + kubectl cluster-info + ``` + +1. Install CRDs. + + ``` + make install + ``` + +1. Build and Push the image. + + ``` + IMG= make build-and-push + ``` + + Update kustomize image patch file `config/default/manager_image_patch.yaml` for manager resource manually. + +1. Run Controller. + + Update `config/manager/manager.yaml` with your service principal. + + ``` + make deploy + ``` + +1. Create a Custom Resource. + + Create your CR (make sure to edit them first to specify the fields). Example: + + ``` + kubectl apply -f examples/service/v1alpha1/storage.yaml + ``` + +## Add a New Custom Resource + +### 1. Add a New API + +``` +kubebuilder create api --group service --version v1alpha1 --kind +``` + +Refer to [kubebuilder's doc](https://book.kubebuilder.io/cronjob-tutorial/new-api.html) + +### 2. Design an API + +1. Try to create the specific Azure service, and download the template in the `Review+Create` step. +2. Upload the template to a storage account. For now, we can use the storage account `azureserviceoperator`. +3. Based on the template, we can figure out what the `Spec` should be like. +4. The `Status` should contain the resource group name, which can be used to delete the resource. + +Refer to [kubebuilder's doc](https://book.kubebuilder.io/cronjob-tutorial/api-design.html) + +Note: + +* Don't forget to add `// +kubebuilder:subresource:status` if we want a status subresource. + +* Run `make manifests` if you find the property you add doesn't work. + +### 3. Delete external resource + +[Using Finalizers](https://book.kubebuilder.io/reference/using-finalizers.html) diff --git a/examples/demo/azure-vote-app-redis.yaml b/examples/demo/azure-vote-app-redis.yaml new file mode 100644 index 00000000000..6609a498a14 --- /dev/null +++ b/examples/demo/azure-vote-app-redis.yaml @@ -0,0 +1,65 @@ +apiVersion: service.azure/v1alpha1 +kind: RedisCache +metadata: + name: azure-redis +spec: + location: eastus2 + properties: + sku: + name: Basic + family: C + capacity: 1 + enableNonSslPort: true +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-vote-front +spec: + replicas: 1 + selector: + matchLabels: + app: azure-vote-front + template: + metadata: + labels: + app: azure-vote-front + spec: + nodeSelector: + "beta.kubernetes.io/os": linux + containers: + - name: azure-vote-front + image: microsoft/azure-vote-front:v1 + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 250m + memory: 256Mi + ports: + - containerPort: 80 + env: + - name: REDIS_NAME + valueFrom: + secretKeyRef: + name: azure-redis + key: redisCacheName + - name: REDIS + value: $(REDIS_NAME).redis.cache.windows.net + - name: REDIS_PWD + valueFrom: + secretKeyRef: + name: azure-redis + key: primaryKey +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-vote-front +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: azure-vote-front diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index b92001fb4ed..d82aa77e076 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -11,4 +11,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ \ No newline at end of file +*/ diff --git a/main.go b/main.go index 1ed35c2930f..d5ef167cc56 100644 --- a/main.go +++ b/main.go @@ -19,11 +19,12 @@ import ( "flag" "os" + "k8s.io/apimachinery/pkg/runtime" + azurev1 "github.com/Azure/azure-service-operator/api/v1" "github.com/Azure/azure-service-operator/controllers" resourcemanagerconfig "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" - "k8s.io/apimachinery/pkg/runtime" kscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" @@ -32,6 +33,10 @@ import ( ) var ( + masterURL, kubeconfig, resources, clusterName string + cloudName, tenantID, subscriptionID, clientID, clientSecret string + useAADPodIdentity bool + scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") ) @@ -71,6 +76,33 @@ func main() { os.Exit(1) } + err = (&controllers.StorageReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Storage"), + Recorder: mgr.GetEventRecorderFor("Storage-controller"), + }).SetupWithManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Storage") + os.Exit(1) + } + err = (&controllers.CosmosDBReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("CosmosDB"), + Recorder: mgr.GetEventRecorderFor("CosmosDB-controller"), + }).SetupWithManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create controller", "controller", "CosmosDB") + os.Exit(1) + } + if err = (&controllers.RedisCacheReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("RedisCache"), + Recorder: mgr.GetEventRecorderFor("RedisCache-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "RedisCache") + os.Exit(1) + } + err = resourcemanagerconfig.LoadSettings() if err != nil { setupLog.Error(err, "unable to parse settings required to provision resources in Azure") diff --git a/pkg/client/deployment/deployment.go b/pkg/client/deployment/deployment.go new file mode 100644 index 00000000000..1ab6e99e044 --- /dev/null +++ b/pkg/client/deployment/deployment.go @@ -0,0 +1,41 @@ +package deployment + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" + + "github.com/Azure/azure-service-operator/pkg/config" + "github.com/Azure/azure-service-operator/pkg/iam" +) + +func getDeploymentsClient() resources.DeploymentsClient { + deployClient := resources.NewDeploymentsClient(config.Instance.SubscriptionID) + a, _ := iam.GetResourceManagementAuthorizer() + deployClient.Authorizer = a + return deployClient +} + +// CreateDeployment creates a template deployment using the +// referenced JSON files for the template and its parameters +func CreateDeployment(ctx context.Context, resourceGroupName, deploymentName string, template, params *map[string]interface{}) error { + deployClient := getDeploymentsClient() + _, err := deployClient.CreateOrUpdate( + ctx, + resourceGroupName, + deploymentName, + resources.Deployment{ + Properties: &resources.DeploymentProperties{ + Template: template, + Parameters: params, + Mode: resources.Incremental, + }, + }, + ) + return err +} + +func GetDeployment(ctx context.Context, resourceGroupName, deploymentName string) (de resources.DeploymentExtended, err error) { + deployClient := getDeploymentsClient() + return deployClient.Get(ctx, resourceGroupName, deploymentName) +} diff --git a/pkg/client/group/group.go b/pkg/client/group/group.go new file mode 100644 index 00000000000..c9dd8742f42 --- /dev/null +++ b/pkg/client/group/group.go @@ -0,0 +1,39 @@ +package group + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" + "github.com/Azure/azure-service-operator/pkg/config" + "github.com/Azure/azure-service-operator/pkg/iam" + "github.com/Azure/go-autorest/autorest/to" +) + +func getGroupsClient() resources.GroupsClient { + groupsClient := resources.NewGroupsClient(config.Instance.SubscriptionID) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + groupsClient.Authorizer = a + return groupsClient +} + +// CreateGroup creates a new resource group named by env var +func CreateGroup(ctx context.Context, groupName, location string, tags map[string]*string) (resources.Group, error) { + groupsClient := getGroupsClient() + return groupsClient.CreateOrUpdate( + ctx, + groupName, + resources.Group{ + Location: to.StringPtr(location), + Tags: tags, + }) +} + +// DeleteGroup removes the resource group named by env var +func DeleteGroup(ctx context.Context, groupName string) (result resources.GroupsDeleteFuture, err error) { + groupsClient := getGroupsClient() + return groupsClient.Delete(ctx, groupName) +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 00000000000..cd1ff39795c --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,56 @@ +package config + +import ( + "fmt" + + "github.com/Azure/go-autorest/autorest/azure" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +var Instance *Config + +type Config struct { + KubeClientset kubernetes.Interface + Resources map[string]bool `json:"resources"` + ClusterName string `json:"clusterName"` + CloudName string `json:"cloudName"` + TenantID string `json:"tenantID"` + SubscriptionID string `json:"subscriptionID"` + ClientID string `json:"clientID"` + ClientSecret string `json:"clientSecret"` + UseAADPodIdentity bool `json:"useAADPodIdentity"` +} + +func getKubeconfig(masterURL, kubeconfig string) (*rest.Config, error) { + if kubeconfig != "" { + return clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) + } + return rest.InClusterConfig() +} + +func CreateKubeClientset(masterURL, kubeconfig string) (kubernetes.Interface, error) { + config, err := getKubeconfig(masterURL, kubeconfig) + if err != nil { + return nil, fmt.Errorf("failed to get k8s config. %+v", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to get k8s client. %+v", err) + } + + return clientset, nil +} + +// Environment() returns an `azure.Environment{...}` for the current cloud. +func Environment() azure.Environment { + cloudName := Instance.CloudName + env, err := azure.EnvironmentFromName(cloudName) + if err != nil { + panic(fmt.Sprintf( + "invalid cloud name '%s' specified, cannot continue\n", cloudName)) + } + return env +} diff --git a/pkg/helpers/deployment.go b/pkg/helpers/deployment.go new file mode 100644 index 00000000000..46bde127113 --- /dev/null +++ b/pkg/helpers/deployment.go @@ -0,0 +1,36 @@ +package helpers + +import ( + "bytes" + "text/template" +) + +// IsDeploymentComplete will dtermine if the deployment is complete +func IsDeploymentComplete(status string) bool { + switch status { + case "Succeeded": + return true + case "Failed": + return true + case "Canceled": + return true + } + return false +} + +// Templatize returns the proper values based on the templating +func Templatize(tempStr string, data interface{}) (resp string, err error) { + t := template.New("templating") + t, err = t.Parse(string(tempStr)) + if err != nil { + return + } + + var tpl bytes.Buffer + err = t.Execute(&tpl, data) + return tpl.String(), err +} + +func GetOutput(outputs interface{}, key string) string { + return outputs.(map[string]interface{})[key].(map[string]interface{})["value"].(string) +} diff --git a/pkg/helpers/helpers.go b/pkg/helpers/helpers.go new file mode 100644 index 00000000000..e033f019db0 --- /dev/null +++ b/pkg/helpers/helpers.go @@ -0,0 +1,43 @@ +package helpers + +import ( + "crypto/md5" + "fmt" + "io" + "regexp" + "strings" + + "github.com/Azure/go-autorest/autorest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + log = ctrl.Log.WithName("helpers") +) + +// KubernetesResourceName returns the resource name for other components +func KubernetesResourceName(name string) string { + reg, _ := regexp.Compile("[^a-zA-Z0-9_-]+") + return reg.ReplaceAllString(name, "-") +} + +func AzrueResourceGroupName(subscriptionID, clusterName, resourceType, name, namespace string) string { + nameParts := []string{subscriptionID, clusterName, resourceType, name, namespace} + nameString := strings.Join(nameParts, "-") + log.V(1).Info("Getting Azure Resource Group Name", "nameString", nameString) + hash := md5.New() + io.WriteString(hash, nameString) + return fmt.Sprintf("aso-%x", hash.Sum(nil)) +} + +func IgnoreKubernetesResourceNotFound(err error) error { + return client.IgnoreNotFound(err) +} + +func IgnoreAzureResourceNotFound(err error) error { + if err.(autorest.DetailedError).StatusCode.(int) == 404 { + return nil + } + return err +} diff --git a/pkg/helpers/secret.go b/pkg/helpers/secret.go new file mode 100644 index 00000000000..dc6f7a891fd --- /dev/null +++ b/pkg/helpers/secret.go @@ -0,0 +1,49 @@ +package helpers + +import ( + "github.com/Azure/azure-service-operator/pkg/config" + apiv1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func CreateSecret(resource interface{}, svcName, svcNamespace string, secretTemplate map[string]string) string { + data := map[string]string{} + for key, value := range secretTemplate { + tempValue, err := Templatize(value, Data{Obj: resource}) + if err != nil { + log.Error(err, "error parsing config map template") + return "" + } + data[key] = tempValue + } + + secretName := KubernetesResourceName(svcName) + secretObj := &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: svcNamespace, + }, + StringData: data, + } + + _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Get(secretName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Create(secretObj) + if err != nil { + log.Error(err, "error creating Secret") + } + } else { + _, err := config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Update(secretObj) + if err != nil { + log.Error(err, "error updating Secret") + } + } + + return secretName +} + +func DeleteSecret(svcName, svcNamespace string) error { + secretName := KubernetesResourceName(svcName) + return config.Instance.KubeClientset.CoreV1().Secrets(svcNamespace).Delete(secretName, &metav1.DeleteOptions{}) +} diff --git a/pkg/helpers/service.go b/pkg/helpers/service.go new file mode 100644 index 00000000000..2cebc8518f8 --- /dev/null +++ b/pkg/helpers/service.go @@ -0,0 +1,57 @@ +package helpers + +import ( + "strconv" + "strings" + + "github.com/Azure/azure-service-operator/pkg/config" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CreateExternalNameService will create a Kubernetes Servic Using ExternalName types +func CreateExternalNameService(resource interface{}, svcName string, svcNamespace string, externalNameTemplate string, svcPortTemplate string) string { + externalName, err := Templatize(externalNameTemplate, Data{Obj: resource}) + if err != nil { + log.Error(err, "error parsing external name template") + return "" + } + + svcPortString, err := Templatize(svcPortTemplate, Data{Obj: resource}) + if err != nil { + log.Error(err, "error parsing service port template") + return "" + } + + svcPortStripSlash := strings.Replace(svcPortString, "\\", "", -1) + + svcPortInt64, err := strconv.ParseInt(svcPortStripSlash, 0, 16) + if err != nil { + log.Error(err, "error converting service port template string to int") + return "" + } + + // ParseInt only returns an int64, must convert to int32 for apiv1.ServicePort field + svcPort := int32(svcPortInt64) + + service := &apiv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: KubernetesResourceName(svcName), + }, + Spec: apiv1.ServiceSpec{ + Type: apiv1.ServiceTypeExternalName, + ExternalName: externalName, + Ports: []apiv1.ServicePort{ + apiv1.ServicePort{ + Port: svcPort, + }, + }, + }, + } + + newService, err := config.Instance.KubeClientset.CoreV1().Services(svcNamespace).Create(service) + if err != nil { + log.Error(err, "error creating service") + } + return newService.Name +} diff --git a/pkg/helpers/types.go b/pkg/helpers/types.go new file mode 100644 index 00000000000..ee69776d1c6 --- /dev/null +++ b/pkg/helpers/types.go @@ -0,0 +1,6 @@ +package helpers + +// Data wrapps the object that is needed for the services +type Data struct { + Obj interface{} +} diff --git a/pkg/iam/authorizers.go b/pkg/iam/authorizers.go new file mode 100644 index 00000000000..3561073ec58 --- /dev/null +++ b/pkg/iam/authorizers.go @@ -0,0 +1,58 @@ +package iam + +import ( + "github.com/Azure/azure-service-operator/pkg/config" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure/auth" +) + +var ( + armAuthorizer autorest.Authorizer +) + +// GetResourceManagementAuthorizer gets an OAuthTokenAuthorizer for Azure Resource Manager +func GetResourceManagementAuthorizer() (autorest.Authorizer, error) { + if armAuthorizer != nil { + return armAuthorizer, nil + } + + var a autorest.Authorizer + var err error + + if config.Instance.UseAADPodIdentity { + a, err = auth.NewAuthorizerFromEnvironment() + } else { + a, err = getAuthorizerForResource(config.Environment().ResourceManagerEndpoint) + } + if err == nil { + // cache + armAuthorizer = a + } else { + // clear cache + armAuthorizer = nil + } + + return armAuthorizer, err +} + +func getAuthorizerForResource(resource string) (autorest.Authorizer, error) { + var a autorest.Authorizer + var err error + + oauthConfig, err := adal.NewOAuthConfig( + config.Environment().ActiveDirectoryEndpoint, config.Instance.TenantID) + if err != nil { + return nil, err + } + + token, err := adal.NewServicePrincipalToken( + *oauthConfig, config.Instance.ClientID, config.Instance.ClientSecret, resource) + if err != nil { + return nil, err + } + a = autorest.NewBearerAuthorizer(token) + + return a, err +} diff --git a/pkg/resourcemanager/cosmosdbs/cosmosdbs.go b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go new file mode 100644 index 00000000000..72ab24b39eb --- /dev/null +++ b/pkg/resourcemanager/cosmosdbs/cosmosdbs.go @@ -0,0 +1,104 @@ +package cosmosdbs + +import ( + "context" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2015-04-08/documentdb" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + "github.com/Azure/go-autorest/autorest/to" +) + +func getCosmosDBClient() documentdb.DatabaseAccountsClient { + cosmosDBClient := documentdb.NewDatabaseAccountsClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + cosmosDBClient.Authorizer = a + cosmosDBClient.AddToUserAgent(config.UserAgent()) + return cosmosDBClient +} + +// CreateCosmosDB creates a new CosmosDB +func CreateCosmosDB(ctx context.Context, groupName string, + cosmosDBName string, + location string, + kind azurev1.CosmosDBKind, + dbType azurev1.CosmosDBDatabaseAccountOfferType, + tags map[string]*string) (documentdb.DatabaseAccount, error) { + cosmosDBClient := getCosmosDBClient() + + log.Println("CosmosDB:CosmosDBName" + cosmosDBName) + + /* Uncomment and update if we should be checking for name exists first + result, err = cosmosDBClient.CheckNameExists(ctx, cosmosDBName) + if err != nil { + return documentdb.DatabaseAccount.{}, err + } + result. + if *result.NameAvailable == false { + log.Fatalf("storage account not available: %v\n", result.Reason) + return storage.Account{}, errors.New("storage account not available") + }*/ + + dbKind := documentdb.DatabaseAccountKind(kind) + sDBType := string(dbType) + + /* + * Current state of Locations and CosmosDB properties: + * Creating a Database account with CosmosDB requires + * that DatabaseAccountCreateUpdateProperties be sent over + * and currently we are not reading most of these values in + * as part of the Spec for CosmosDB. We are currently + * specifying a single Location as part of a location array + * which matches the location set for the overall CosmosDB + * instance. This matches the general behavior of creating + * a CosmosDB instance in the portal where the only + * geo-relicated region is the sole region the CosmosDB + * is created in. + */ + locationObj := documentdb.Location{ + ID: to.StringPtr(fmt.Sprintf("%s-%s", cosmosDBName, location)), + FailoverPriority: to.Int32Ptr(0), + LocationName: to.StringPtr(location), + } + + locationsArray := []documentdb.Location{ + locationObj, + } + + createUpdateParams := documentdb.DatabaseAccountCreateUpdateParameters{ + Location: to.StringPtr(location), + Tags: tags, + Name: &cosmosDBName, + Kind: dbKind, + Type: to.StringPtr("Microsoft.DocumentDb/databaseAccounts"), + ID: &cosmosDBName, + DatabaseAccountCreateUpdateProperties: &documentdb.DatabaseAccountCreateUpdateProperties{ + DatabaseAccountOfferType: &sDBType, + EnableMultipleWriteLocations: to.BoolPtr(false), + IsVirtualNetworkFilterEnabled: to.BoolPtr(false), + Locations: &locationsArray, + }, + } + + log.Println(fmt.Sprintf("creating cosmosDB '%s' in resource group '%s' and location: %v", cosmosDBName, groupName, location)) + + future, err := cosmosDBClient.CreateOrUpdate( + ctx, groupName, cosmosDBName, createUpdateParams) + if err != nil { + log.Println(fmt.Sprintf("ERROR creating cosmosDB '%s' in resource group '%s' and location: %v", cosmosDBName, groupName, location)) + log.Println(fmt.Printf("failed to initialize cosmosdb: %v\n", err)) + } + return future.Result(cosmosDBClient) +} + +// DeleteCosmosDB removes the resource group named by env var +func DeleteCosmosDB(ctx context.Context, groupName string, cosmosDBName string) (result documentdb.DatabaseAccountsDeleteFuture, err error) { + cosmosDBClient := getCosmosDBClient() + return cosmosDBClient.Delete(ctx, groupName, cosmosDBName) +} \ No newline at end of file diff --git a/pkg/resourcemanager/rediscaches/rediscaches.go b/pkg/resourcemanager/rediscaches/rediscaches.go new file mode 100644 index 00000000000..8ebe0e2df29 --- /dev/null +++ b/pkg/resourcemanager/rediscaches/rediscaches.go @@ -0,0 +1,85 @@ +package rediscaches + +import ( + "context" + "errors" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/redis/mgmt/2018-03-01/redis" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + "github.com/Azure/go-autorest/autorest/to" +) + +func getRedisCacheClient() redis.Client { + redisClient := redis.NewClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + redisClient.Authorizer = a + redisClient.AddToUserAgent(config.UserAgent()) + return redisClient +} + +// CreateRedisCache creates a new RedisCache +func CreateRedisCache(ctx context.Context, + groupName string, + redisCacheName string, + location string, + sku azurev1.RedisCacheSku, + enableNonSSLPort bool, + tags map[string]*string) (redis.ResourceType, error) { + redisClient := getRedisCacheClient() + + log.Println("RedisCache:CacheName" + redisCacheName) + + //Check if name is available + redisType := "Microsoft.Cache/redis" + checkNameParams := redis.CheckNameAvailabilityParameters{ + Name: &redisCacheName, + Type: &redisType, + } + result, err := redisClient.CheckNameAvailability(ctx, checkNameParams) + if err != nil { + return redis.ResourceType{}, err + } + + if result.StatusCode != 200 { + log.Fatalf("redis cache name (%s) not available: %v\n", redisCacheName, result.Status) + return redis.ResourceType{}, errors.New("redis cache name not available") + } + + log.Println(fmt.Sprintf("creating rediscache '%s' in resource group '%s' and location: %v", redisCacheName, groupName, location)) + + redisSku := redis.Sku{ + Name: redis.SkuName(sku.Name), + Family: redis.SkuFamily(sku.Family), + Capacity: to.Int32Ptr(sku.Capacity), + } + + createParams := redis.CreateParameters{ + Location: to.StringPtr(location), + Tags: tags, + CreateProperties: &redis.CreateProperties{ + EnableNonSslPort: &enableNonSSLPort, + Sku: &redisSku, + }, + } + + future, err := redisClient.Create( + ctx, groupName, redisCacheName, createParams) + if err != nil { + log.Println(fmt.Sprintf("ERROR creating redisCache '%s' in resource group '%s' and location: %v", redisCacheName, groupName, location)) + log.Println(fmt.Printf("failed to initialize redis Cache: %v\n", err)) + } + return future.Result(redisClient) +} + +// DeleteRedisCache removes the resource group named by env var +func DeleteRedisCache(ctx context.Context, groupName string, redisCacheName string) (result redis.DeleteFuture, err error) { + redisClient := getRedisCacheClient() + return redisClient.Delete(ctx, groupName, redisCacheName) +} diff --git a/pkg/resourcemanager/storages/storages.go b/pkg/resourcemanager/storages/storages.go new file mode 100644 index 00000000000..153348fef20 --- /dev/null +++ b/pkg/resourcemanager/storages/storages.go @@ -0,0 +1,77 @@ +package storages + +import ( + "context" + "errors" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage" + azurev1 "github.com/Azure/azure-service-operator/api/v1" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/config" + "github.com/Azure/azure-service-operator/pkg/resourcemanager/iam" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" +) + +func getStoragesClient() storage.AccountsClient { + storagesClient := storage.NewAccountsClient(config.SubscriptionID()) + a, err := iam.GetResourceManagementAuthorizer() + if err != nil { + log.Fatalf("failed to initialize authorizer: %v\n", err) + } + storagesClient.Authorizer = a + storagesClient.AddToUserAgent(config.UserAgent()) + return storagesClient +} + +// CreateStorage creates a new storage account +func CreateStorage(ctx context.Context, groupName string, + storageAccountName string, + location string, + sku azurev1.StorageSku, + kind azurev1.StorageKind, + tags map[string]*string, + accessTier azurev1.StorageAccessTier, + enableHTTPsTrafficOnly *bool) (storage.Account, error) { + storagesClient := getStoragesClient() + + log.Println("Storage:AccountName" + storageAccountName) + storageType := "Microsoft.Storage/storageAccounts" + checkAccountParams := storage.AccountCheckNameAvailabilityParameters{Name: &storageAccountName, Type: &storageType} + result, err := storagesClient.CheckNameAvailability(ctx, checkAccountParams) + if err != nil { + return storage.Account{}, err + } + + if *result.NameAvailable == false { + log.Fatalf("storage account not available: %v\n", result.Reason) + return storage.Account{}, errors.New("storage account not available") + } + + sSku := storage.Sku{Name: storage.SkuName(sku.Name)} + sKind := storage.Kind(kind) + sAccessTier := storage.AccessTier(accessTier) + + params := storage.AccountCreateParameters{ + Location: to.StringPtr(location), + Sku: &sSku, + Kind: sKind, + Tags: tags, + Identity: nil, + AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{ + AccessTier: sAccessTier, + EnableHTTPSTrafficOnly: enableHTTPsTrafficOnly, + }, + } + + log.Println(fmt.Sprintf("creating storage '%s' in resource group '%s' and location: %v", storageAccountName, groupName, location)) + future, err := storagesClient.Create(ctx, groupName, storageAccountName, params) + return future.Result(storagesClient) +} + +// DeleteStorage removes the resource group named by env var +func DeleteStorage(ctx context.Context, groupName string, storageAccountName string) (result autorest.Response, err error) { + storagesClient := getStoragesClient() + return storagesClient.Delete(ctx, groupName, storageAccountName) +} diff --git a/pkg/template/assets/cosmosdb.json b/pkg/template/assets/cosmosdb.json new file mode 100644 index 00000000000..f4feb76cae2 --- /dev/null +++ b/pkg/template/assets/cosmosdb.json @@ -0,0 +1,47 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "String" + }, + "kind": { + "type": "String" + }, + "properties": { + "type": "Object" + } + }, + "variables": { + "cosmosDBName": "[concat('aso', uniqueString(resourceGroup().id))]" + }, + "resources": [ + { + "type": "Microsoft.DocumentDB/databaseAccounts", + "apiVersion": "2015-04-08", + "name": "[variables('cosmosDBName')]", + "location": "[parameters('location')]", + "dependsOn": [], + "kind": "[parameters('kind')]", + "properties": { + "databaseAccountOfferType": "[parameters('properties').databaseAccountOfferType]", + "locations": [ + { + "locationName": "[parameters('location')]", + "failoverPriority": 0 + } + ] + } + } + ], + "outputs": { + "cosmosDBName": { + "type": "string", + "value": "[variables('cosmosDBName')]" + }, + "primaryMasterKey": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.DocumentDB/databaseAccounts', variables('cosmosDBName')), '2015-04-08').primaryMasterKey]" + } + } +} diff --git a/pkg/template/assets/rediscache.json b/pkg/template/assets/rediscache.json new file mode 100644 index 00000000000..551af6f2ce5 --- /dev/null +++ b/pkg/template/assets/rediscache.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "string" + }, + "properties.sku.name": { + "type": "string" + }, + "properties.sku.family": { + "type": "string" + }, + "properties.sku.capacity": { + "type": "int" + }, + "properties.enableNonSslPort": { + "type": "bool" + } + }, + "variables": { + "redisCacheName": "[concat('aso', uniqueString(resourceGroup().id))]" + }, + "resources": [ + { + "type": "Microsoft.Cache/Redis", + "apiVersion": "2018-03-01", + "name": "[variables('redisCacheName')]", + "location": "[parameters('location')]", + "properties": { + "sku": { + "name": "[parameters('properties.sku.name')]", + "family": "[parameters('properties.sku.family')]", + "capacity": "[parameters('properties.sku.capacity')]" + }, + "enableNonSslPort": "[parameters('properties.enableNonSslPort')]", + "redisConfiguration": {} + } + } + ], + "outputs": { + "redisCacheName": { + "type": "string", + "value": "[variables('redisCacheName')]" + }, + "primaryKey": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Cache/Redis', variables('redisCacheName')), '2018-03-01').primaryKey]" + }, + "secondaryKey": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Cache/Redis', variables('redisCacheName')), '2018-03-01').secondaryKey]" + } + } +} diff --git a/pkg/template/assets/storage.json b/pkg/template/assets/storage.json new file mode 100644 index 00000000000..893f47ae8e7 --- /dev/null +++ b/pkg/template/assets/storage.json @@ -0,0 +1,63 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "location": { + "type": "String" + }, + "accountType": { + "type": "String" + }, + "kind": { + "type": "String" + }, + "accessTier": { + "type": "String" + }, + "supportsHttpsTrafficOnly": { + "type": "Bool" + } + }, + "variables": { + "storageAccountName": "[concat('aso', uniqueString(resourceGroup().id))]" + }, + "resources": [ + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "2018-07-01", + "name": "[variables('storageAccountName')]", + "location": "[parameters('location')]", + "dependsOn": [], + "sku": { + "name": "[parameters('accountType')]" + }, + "kind": "[parameters('kind')]", + "properties": { + "accessTier": "[parameters('accessTier')]", + "supportsHttpsTrafficOnly": "[parameters('supportsHttpsTrafficOnly')]" + } + } + ], + "outputs": { + "storageAccountName": { + "type": "string", + "value": "[variables('storageAccountName')]" + }, + "key1": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[0].value]" + }, + "key2": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[1].value]" + }, + "connectionString1": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=',variables('storageAccountName'),';AccountKey=',listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[0].value,';EndpointSuffix=core.windows.net')]" + }, + "connectionString2": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=',variables('storageAccountName'),';AccountKey=',listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('storageAccountName')), '2018-07-01').keys[1].value,';EndpointSuffix=core.windows.net')]" + } + } +} diff --git a/pkg/template/templates.go b/pkg/template/templates.go new file mode 100644 index 00000000000..6a9798c4ebc --- /dev/null +++ b/pkg/template/templates.go @@ -0,0 +1,281 @@ +// Code generated by go-bindata. +// sources: +// pkg/template/assets/cosmosdb.json +// pkg/template/assets/rediscache.json +// pkg/template/assets/storage.json +// DO NOT EDIT! + +package template + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _cosmosdbJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x54\x4d\x6b\xdb\x40\x10\xbd\xfb\x57\x88\x6d\x41\x12\xc8\xb2\x5c\x5a\x28\xbe\x35\x18\x4a\x09\xa9\x0b\x0d\xbd\x18\x1f\xc6\xab\x71\xb2\xa9\xb4\xb3\xdd\x9d\x35\xa8\xc5\xff\xbd\xc8\xf2\x87\x24\x5b\x49\x88\x7d\x31\x33\xf3\xde\x9b\xd9\x37\xe3\x7f\xa3\x20\x08\x02\xf1\xde\xc9\x47\x2c\x41\xcc\x02\xf1\xc8\x6c\x66\x93\x49\x13\x48\x4b\xd0\xf0\x80\x25\x6a\x4e\xe1\xaf\xb7\x98\x4a\x2a\x0f\x39\x37\xf9\x90\x4d\x3f\x8d\xb3\xe9\x38\x9b\x4e\x72\x34\x05\x55\x75\xdd\x3d\x96\xa6\x00\xc6\xf4\xc9\x91\x7e\x27\x92\x46\x40\x92\x66\xd4\xfc\x0b\xad\x53\xa4\x6b\x9d\x69\x9a\xd5\xdf\x63\x81\x01\x0b\x25\x32\x5a\x27\x66\x41\xd3\xd5\x3e\x5e\x90\x04\x6e\x20\xe7\xe8\x3e\xc3\x95\xc1\x9a\xe8\x27\x5b\xa5\x1f\xc4\x29\xb9\x4b\xce\xe8\xdf\x4a\xe7\x6f\x43\x1a\x4b\x06\x2d\x2b\x74\xc3\xf8\xc5\xfa\x09\x25\xb7\xf0\xa3\x16\x8b\xd8\x82\x55\xb0\x2e\x7a\x04\x42\x92\x2b\xc9\xcd\x6f\xbe\x43\xb9\x27\x59\x4a\xd2\x12\x38\x0a\xc1\x51\x98\x04\x5e\xab\x3f\x1e\x9b\xce\x22\x8b\x8e\xbc\x95\xf8\xd5\x92\x37\x51\x9c\xaa\x3c\x8e\x57\xa2\xa3\x72\x2c\xa9\x55\x96\x27\x95\x81\x86\xef\x94\xb4\xe4\x68\xc3\xe9\x9c\xa4\xaf\xdd\x9a\xdf\x4c\x72\x60\x58\x83\xc3\x2f\x52\x92\xd7\xec\x44\xd2\x05\x83\x51\x2d\xdb\x1a\xcf\x3f\x8e\xb3\xcf\xfd\x3a\x7d\x1c\xe8\x34\x78\x14\xb6\x87\x0d\xe3\x55\x1f\xd2\x32\x57\x2c\xcf\x1b\x10\x85\xc7\xc4\x15\x4c\x8e\x06\x75\xee\x16\x35\x68\xb9\xea\x25\x0f\x7e\x77\xc9\xea\xe0\x15\xa2\x67\x1c\x6e\x84\xba\xef\xb2\xd8\x6c\xd0\xde\x1f\x1e\xb2\xc3\x7f\x26\x0a\xe3\x74\x08\xd5\x97\xef\xcc\xdf\x35\xaf\xfd\xb9\xec\xeb\x02\x7c\x5a\xa4\xd7\x3d\x60\x87\x63\x03\xaa\xa0\x2d\xda\x1f\x56\x91\x55\x5c\x89\x59\x90\x5d\xad\xde\x5d\x44\x57\xa3\xeb\xf9\xe6\xd7\xc1\x18\x41\x9e\x8d\xe7\xe7\x4f\x60\x60\x59\x5d\x73\x9d\x3d\xd7\xb6\x50\xf8\x17\xd7\x6c\xe0\xa2\x55\x09\xb6\xba\x03\xc7\x68\x6f\xb1\x7a\xbb\x72\xa1\x1c\xdf\x62\xe5\x4e\xf7\xf9\x2d\x8f\xc2\x57\xdd\x56\x98\x04\x83\x6d\xc7\x49\x10\x9e\xaf\x2b\x8c\xd3\x7e\xc3\xab\x8b\x7f\x9a\xd1\x6e\xf4\x3f\x00\x00\xff\xff\x6e\x83\x09\x2a\xc1\x05\x00\x00") + +func cosmosdbJsonBytes() ([]byte, error) { + return bindataRead( + _cosmosdbJson, + "cosmosdb.json", + ) +} + +func cosmosdbJson() (*asset, error) { + bytes, err := cosmosdbJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cosmosdb.json", size: 1473, mode: os.FileMode(420), modTime: time.Unix(1563946123, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _rediscacheJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\x4d\x8f\xd3\x30\x10\xbd\xf7\x57\x44\x06\x29\x89\x94\xba\xc9\x22\x24\xd4\x2b\x07\x84\x56\xac\x10\x8b\xb8\x54\x3d\xcc\xba\xd3\xd6\x6c\xe2\x31\xfe\x58\xa9\xa0\xfe\x77\x94\xa6\xf9\x68\x9a\xa4\x1c\xf6\x40\x7b\xf3\x3c\xbf\xf7\x26\xf3\xc6\x7f\x66\x41\x10\x04\xec\xad\x15\x7b\x2c\x80\x2d\x03\xb6\x77\x4e\x2f\x17\x8b\xea\x80\x17\xa0\x60\x87\x05\x2a\xc7\xe1\xb7\x37\xc8\x05\x15\xe7\x9a\x5d\xdc\xa5\xd9\xfb\x79\x9a\xcd\xd3\x6c\xb1\x41\x9d\xd3\xa1\xc4\x7d\xc7\x42\xe7\xe0\x90\xff\xb4\xa4\xde\xb0\xa4\x12\x10\xa4\x1c\x2a\xf7\x03\x8d\x95\xa4\x4a\x9d\x8c\xa7\xe5\xbf\x06\x68\x30\x50\xa0\x43\x63\xd9\x32\xa8\x5c\x9d\xce\x73\x12\xe0\xaa\x2b\xed\xe9\xa9\xe2\x0e\x1a\x4b\x22\xeb\x8c\x54\x3b\xd6\x14\x8f\x49\x7b\x5b\x1b\xd2\x68\x9c\x44\xcb\xed\xb3\xe7\x0a\x0a\x7c\x15\xa2\x2d\x14\x32\x3f\xbc\x0a\x95\x00\x0d\x42\xba\x09\x32\xa9\xdc\x4d\x26\x54\xf0\x94\xe3\x03\xa9\x47\x9b\x7f\x25\xe3\xc6\xd9\x9e\x88\xf2\x0e\xdd\xac\x43\xca\x5e\xc0\xc8\x92\xa7\x37\x03\x83\x1b\x69\x3f\x82\xd8\xe3\x43\xf5\x01\xd9\x4a\x90\x12\xe0\xa2\x10\x2c\x85\x49\xe0\x95\xfc\xe5\xf1\xf1\xd4\x74\x64\xd0\x92\x37\x02\x3f\x19\xf2\x3a\x8a\xb9\xdc\xc4\xf1\x9a\x5d\xe8\xd4\x90\x52\x67\xd5\xe8\x8c\x18\xfe\x22\x85\x21\x4b\x5b\xc7\x4f\x16\x16\xdf\x4a\x37\x2c\xb9\x04\x83\x96\x9d\x6c\xdd\xa5\xd9\x87\x79\xfa\x6e\x9e\x66\x7d\x9c\xaa\x1b\x68\x5a\x8d\xc2\xcb\xf6\xc2\x78\xdd\xbf\xd4\xc9\x20\x5b\xb5\x41\x8d\xc2\xba\x30\x70\xa7\x9d\xcd\xd5\x28\x4e\x75\xfb\xec\x07\x0b\x97\x2e\xbb\x62\x03\x59\xbe\xd6\x6d\x28\x9a\x84\x4e\x92\x54\xa8\x09\x9a\x4e\x3a\x27\x89\x6a\x5c\x58\x8f\xba\xfb\x3b\x5e\x93\xb3\x81\xc0\x8e\x0a\xf4\xb1\xc3\x7e\xcf\x31\x25\xb5\x95\x3b\x6f\x9a\x47\xe3\x78\x01\x3c\xf6\x92\xbf\x3e\x27\x92\xbc\xd3\xde\xdd\xca\xfd\xf4\xb6\xf7\x12\xf0\x02\xb9\xff\x87\xac\x8d\x6c\xb6\x2c\xc0\x1c\xee\xf1\xf6\x1b\x33\xaa\x9a\x4b\xeb\xee\xf1\x60\x9b\x85\xfc\xbc\x89\xc2\xc1\x65\x0a\x93\x60\xc2\x62\x9c\x04\x61\xbb\x50\x61\xcc\x5b\x73\x23\xee\x2d\x0a\x52\x9b\xff\xd6\x7f\xd7\xde\xfa\xea\x29\x9c\x1d\x67\x7f\x03\x00\x00\xff\xff\xa7\x3e\x2d\x81\x15\x07\x00\x00") + +func rediscacheJsonBytes() ([]byte, error) { + return bindataRead( + _rediscacheJson, + "rediscache.json", + ) +} + +func rediscacheJson() (*asset, error) { + bytes, err := rediscacheJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "rediscache.json", size: 1813, mode: os.FileMode(420), modTime: time.Unix(1563946509, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _storageJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x55\x4f\x6f\xd3\x4e\x10\xbd\xe7\x53\x58\xfe\xfd\xa4\x4d\x24\xc7\xb1\x2b\x21\x50\xab\x1c\x40\x20\x40\x15\x14\xa9\x11\x97\x28\x87\x65\x3d\x69\x97\xda\x3b\xcb\xce\x6c\x8b\x41\xfd\xee\xc8\x71\x9d\x38\x4e\x4c\xaa\xf4\x80\x40\xbe\xed\xfc\x7b\xef\xf9\xcd\xee\xcf\x41\x10\x04\x41\xf8\x3f\xa9\x6b\x28\x64\x78\x1a\x84\xd7\xcc\xf6\x74\x32\xa9\x0f\xe2\x42\x1a\x79\x05\x05\x18\x8e\xe5\x0f\xef\x20\x56\x58\x3c\xc4\x68\x72\x92\xa4\xcf\xc6\x49\x3a\x4e\xd2\x49\x06\x36\xc7\xb2\xca\x9b\x41\x61\x73\xc9\x10\x7f\x25\x34\xff\x85\x51\x3d\x40\xa1\x61\x30\xfc\x19\x1c\x69\x34\xd5\x9c\x34\x4e\xaa\xaf\x49\xb0\xd2\xc9\x02\x18\x1c\x85\xa7\x41\x8d\x6a\x75\x9e\xa3\x92\x5c\x97\x6c\x4e\x57\x11\x2e\x2d\x54\x8d\x2e\xd9\x69\x73\x15\xae\x83\xf7\xd1\xa6\x5a\x2a\x85\xde\xf0\xac\x4e\x3d\xa2\xc1\x8d\x36\xd9\xd1\xa3\x81\x68\xa6\xc1\x1d\x57\x4f\xde\x5a\x74\x4c\xef\x98\x2d\xcd\x9c\x5c\x2e\xb5\xba\x30\x79\xd9\xdf\xed\x15\x62\xde\xea\x35\x68\x75\x0c\x6f\xa5\xd3\xf2\x4b\x0e\x1d\x75\x89\xd1\xc9\x2b\x78\x59\xcb\xf4\x51\x16\xab\x46\x73\x85\x46\x49\x1e\x0a\x49\x28\xa2\xc0\x1b\xfd\xcd\x43\x8d\x75\xe8\x80\xd0\x3b\x05\x6f\x1d\x7a\x3b\x1c\xc5\x3a\x1b\x8d\x16\xe1\xd6\xac\x26\xa5\x9a\x35\x5f\xcf\xea\x01\xfd\x41\x2b\x87\x84\x4b\x8e\x2f\x6b\x2c\x93\x6d\x4c\x14\x46\xdb\x85\xd2\xea\x96\x8b\x4e\x92\xf4\xc5\x38\x79\x3e\x4e\xd2\x6e\x9e\x69\xc8\xac\xa9\x0f\xc5\x2e\x5d\x31\x5a\x74\x0b\x5b\x8e\x0b\xe7\x1b\x5b\x0e\x45\x13\xd8\x53\x93\x81\x05\x93\xd1\x45\x55\x34\x5f\x74\x82\x74\xe3\x77\x7e\xda\x36\xc4\xf6\x94\x96\x65\x45\xa3\xec\x1e\x7f\x04\x2d\x77\x6e\x37\xa8\x0e\xf7\x40\xb4\x0e\x2d\x38\xd6\x1d\x0b\x6c\x74\x6d\xfb\x75\x07\xd2\x43\x68\xb7\x6f\x70\xc0\xab\xdb\x9d\xfa\x12\x77\x99\x76\x7c\xfc\xa0\x69\x88\x9e\xad\xe7\xc7\xb8\xb8\xc7\x6f\x54\xaf\x5c\x47\x9c\x5b\x99\xfb\x47\xba\x65\xff\x35\x01\x65\x7a\xfc\xcc\x5c\x13\x9f\x43\x49\xeb\xe5\x7a\x9f\x0d\xc5\xc1\xc5\x10\x51\x70\x00\xec\x28\x0a\xc4\x66\x41\xc4\x28\xbe\x81\x92\xe6\xc9\x22\x5e\x8d\xee\xa7\x72\xf2\xb7\x50\x49\x7f\x4f\x45\xa1\x31\xa0\xaa\x95\xad\x2f\xaf\x27\xfc\xa2\xe6\x46\x7c\x0d\x4b\xe9\x73\x7e\x63\x32\x8b\xda\x30\x7d\x72\xc8\xa8\x30\x9f\x56\xcf\x26\x9d\xb5\x30\x4f\x45\x74\x80\x53\x24\x9a\xfc\x73\x28\xa7\x22\xfa\x33\x36\x88\xc4\x59\xc3\xe6\xd2\x2f\x97\xfa\xfb\x54\xa1\x83\xf8\x4e\x9b\x0c\xef\x28\x36\xc0\xbd\xae\xef\xea\xfb\x04\xdf\xfc\x7b\xfa\xa6\x47\xea\x5b\x3f\xa5\x83\xfb\xc1\xaf\x00\x00\x00\xff\xff\xa5\xf7\x8d\x6d\x9b\x09\x00\x00") + +func storageJsonBytes() ([]byte, error) { + return bindataRead( + _storageJson, + "storage.json", + ) +} + +func storageJson() (*asset, error) { + bytes, err := storageJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "storage.json", size: 2459, mode: os.FileMode(420), modTime: time.Unix(1563946123, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "cosmosdb.json": cosmosdbJson, + "rediscache.json": rediscacheJson, + "storage.json": storageJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "cosmosdb.json": &bintree{cosmosdbJson, map[string]*bintree{}}, + "rediscache.json": &bintree{rediscacheJson, map[string]*bintree{}}, + "storage.json": &bintree{storageJson, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +}