Skip to content

Commit

Permalink
Improve deployment process
Browse files Browse the repository at this point in the history
  - Add tests ensuring various edge cases are covered.
  - Fix bugs in deployment process which tests uncovered.
  • Loading branch information
matthchr committed Jul 1, 2021
1 parent 64508fd commit b7987fb
Show file tree
Hide file tree
Showing 23 changed files with 12,885 additions and 75 deletions.
49 changes: 24 additions & 25 deletions hack/generated/controllers/crd_vmss_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

func makeVNETForVMSS(tc testcommon.KubePerTestContext, owner genruntime.KnownResourceReference) *network.VirtualNetwork {
func newVNETForVMSS(tc testcommon.KubePerTestContext, owner genruntime.KnownResourceReference) *network.VirtualNetwork {
return &network.VirtualNetwork{
ObjectMeta: tc.MakeObjectMetaWithName(tc.Namer.GenerateName("vn")),
Spec: network.VirtualNetworks_Spec{
Expand All @@ -33,7 +33,7 @@ func makeVNETForVMSS(tc testcommon.KubePerTestContext, owner genruntime.KnownRes
}
}

func makeSubnetForVMSS(tc testcommon.KubePerTestContext, owner genruntime.KnownResourceReference) *network.VirtualNetworksSubnet {
func newSubnetForVMSS(tc testcommon.KubePerTestContext, owner genruntime.KnownResourceReference) *network.VirtualNetworksSubnet {
return &network.VirtualNetworksSubnet{
ObjectMeta: tc.MakeObjectMeta("subnet"),
Spec: network.VirtualNetworksSubnets_Spec{
Expand All @@ -43,7 +43,7 @@ func makeSubnetForVMSS(tc testcommon.KubePerTestContext, owner genruntime.KnownR
}
}

func makePublicIPAddressForVMSS(tc testcommon.KubePerTestContext, owner genruntime.KnownResourceReference) *network.PublicIPAddresses {
func newPublicIPAddressForVMSS(tc testcommon.KubePerTestContext, owner genruntime.KnownResourceReference) *network.PublicIPAddresses {
publicIPAddressSku := network.PublicIPAddressSkuNameStandard
return &network.PublicIPAddresses{
ObjectMeta: tc.MakeObjectMetaWithName(tc.Namer.GenerateName("publicip")),
Expand All @@ -58,7 +58,7 @@ func makePublicIPAddressForVMSS(tc testcommon.KubePerTestContext, owner genrunti
}
}

func makeLoadBalancerForVMSS(tc testcommon.KubePerTestContext, rg *resources.ResourceGroup, publicIPAddress *network.PublicIPAddresses) *network.LoadBalancer {
func newLoadBalancerForVMSS(tc testcommon.KubePerTestContext, rg *resources.ResourceGroup, publicIPAddress *network.PublicIPAddresses) *network.LoadBalancer {
loadBalancerSku := network.LoadBalancerSkuNameStandard
lbName := tc.Namer.GenerateName("loadbalancer")
lbFrontendName := "LoadBalancerFrontend"
Expand Down Expand Up @@ -101,34 +101,19 @@ func makeLoadBalancerForVMSS(tc testcommon.KubePerTestContext, rg *resources.Res
}
}

func Test_VMSS_CRUD(t *testing.T) {
t.Parallel()

tc := globalTestContext.ForTest(t)
rg := tc.CreateNewTestResourceGroupAndWait()
func newVMSS(
tc testcommon.KubePerTestContext,
rg *resources.ResourceGroup,
loadBalancer *network.LoadBalancer,
subnet *network.VirtualNetworksSubnet) *compute.VirtualMachineScaleSet {

sshPublicKey, err := tc.GenerateSSHKey(2048)
tc.Expect(err).ToNot(HaveOccurred())

vnet := makeVNETForVMSS(tc, testcommon.AsOwner(rg.ObjectMeta))
subnet := makeSubnetForVMSS(tc, testcommon.AsOwner(vnet.ObjectMeta))
publicIPAddress := makePublicIPAddressForVMSS(tc, testcommon.AsOwner(rg.ObjectMeta))

// Need to create this first because we use the Load Balancer InboundNATPool ARM ID
// in the spec of the VMSS.
tc.CreateResourceAndWait(publicIPAddress)

loadBalancer := makeLoadBalancerForVMSS(tc, rg, publicIPAddress)

// TODO: This has to happen before subnet right now because our controller doesn't deal with children being created before their parents well
tc.CreateResourceAndWait(vnet)
tc.CreateResourcesAndWait(subnet, loadBalancer)

// VMSS
upgradePolicyMode := compute.UpgradePolicyModeAutomatic
adminUsername := "adminUser"

vmss := &compute.VirtualMachineScaleSet{
return &compute.VirtualMachineScaleSet{
ObjectMeta: tc.MakeObjectMetaWithName(tc.Namer.GenerateName("vmss")),
Spec: compute.VirtualMachineScaleSets_Spec{
Location: tc.AzureRegion,
Expand Down Expand Up @@ -195,6 +180,20 @@ func Test_VMSS_CRUD(t *testing.T) {
},
},
}
}

func Test_VMSS_CRUD(t *testing.T) {
t.Parallel()

tc := globalTestContext.ForTest(t)
rg := tc.CreateNewTestResourceGroupAndWait()

vnet := newVNETForVMSS(tc, testcommon.AsOwner(rg.ObjectMeta))
subnet := newSubnetForVMSS(tc, testcommon.AsOwner(vnet.ObjectMeta))
publicIPAddress := newPublicIPAddressForVMSS(tc, testcommon.AsOwner(rg.ObjectMeta))
loadBalancer := newLoadBalancerForVMSS(tc, rg, publicIPAddress)
tc.CreateResourcesAndWait(vnet, subnet, loadBalancer, publicIPAddress)
vmss := newVMSS(tc, rg, loadBalancer, subnet)

tc.CreateResourceAndWait(vmss)
tc.Expect(vmss.Status.Id).ToNot(BeNil())
Expand Down
186 changes: 186 additions & 0 deletions hack/generated/controllers/deployment_reconciler_negative_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
/*
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
*/

package controllers_test

import (
"testing"

. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/client"

compute "github.com/Azure/azure-service-operator/hack/generated/_apis/microsoft.compute/v1alpha1api20201201"
resources "github.com/Azure/azure-service-operator/hack/generated/_apis/microsoft.resources/v1alpha1api20200601"
storage "github.com/Azure/azure-service-operator/hack/generated/_apis/microsoft.storage/v1alpha1api20210401"
"github.com/Azure/azure-service-operator/hack/generated/pkg/armclient"
"github.com/Azure/azure-service-operator/hack/generated/pkg/reconcilers"
"github.com/Azure/azure-service-operator/hack/generated/pkg/testcommon"
"github.com/Azure/go-autorest/autorest/to"
)

func newStorageAccountWithInvalidKeyExpiration(tc testcommon.KubePerTestContext, rg *resources.ResourceGroup) *storage.StorageAccount {
// Custom namer because storage accounts have strict names
namer := tc.Namer.WithSeparator("")

// Create a storage account with an invalid key expiration period
accessTier := storage.StorageAccountPropertiesCreateParametersAccessTierHot
return &storage.StorageAccount{
ObjectMeta: tc.MakeObjectMetaWithName(namer.GenerateName("stor")),
Spec: storage.StorageAccounts_Spec{
Location: tc.AzureRegion,
Owner: testcommon.AsOwner(rg.ObjectMeta),
Kind: storage.StorageAccountsSpecKindBlobStorage,
Sku: storage.StorageAccounts_Spec_Sku{
Name: storage.StorageAccountsSpecSkuNameStandardLRS,
},
AccessTier: &accessTier,
KeyPolicy: &storage.KeyPolicy{
KeyExpirationPeriodInDays: -260,
},
},
}
}

func newVMSSWithInvalidPublisher(tc testcommon.KubePerTestContext, rg *resources.ResourceGroup) *compute.VirtualMachineScaleSet {
upgradePolicyMode := compute.UpgradePolicyModeAutomatic
adminUsername := "adminUser"
return &compute.VirtualMachineScaleSet{
ObjectMeta: tc.MakeObjectMetaWithName(tc.Namer.GenerateName("vmss")),
Spec: compute.VirtualMachineScaleSets_Spec{
Location: tc.AzureRegion,
Owner: testcommon.AsOwner(rg.ObjectMeta),
Sku: &compute.Sku{
Name: to.StringPtr("STANDARD_D1_v2"),
Capacity: to.IntPtr(1),
},
PlatformFaultDomainCount: to.IntPtr(3),
SinglePlacementGroup: to.BoolPtr(false),
UpgradePolicy: &compute.UpgradePolicy{
Mode: &upgradePolicyMode,
},
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
StorageProfile: &compute.VirtualMachineScaleSetStorageProfile{
ImageReference: &compute.ImageReference{
Publisher: to.StringPtr("this publisher"),
Offer: to.StringPtr("does not"),
Sku: to.StringPtr("exist"),
Version: to.StringPtr("latest"),
},
},
OsProfile: &compute.VirtualMachineScaleSetOSProfile{
ComputerNamePrefix: to.StringPtr("computer"),
AdminUsername: &adminUsername,
},
NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
NetworkInterfaceConfigurations: []compute.VirtualMachineScaleSetNetworkConfiguration{
{
Name: "mynicconfig",
},
},
},
},
},
}
}

// There are two ways that a deployment can fail. It can be rejected when initially
// submitted to the Azure API, or it can be accepted and then report a failure during
// long running operation polling. This ensures that the second case is handled correctly.
func Test_DeploymentAccepted_LongRunningOperationFails(t *testing.T) {
t.Parallel()

tc := globalTestContext.ForTest(t)
rg := tc.CreateNewTestResourceGroupAndWait()
acct := newStorageAccountWithInvalidKeyExpiration(tc, rg)
tc.CreateResourceAndWaitForFailure(acct)

errAnnotation := acct.Annotations[reconcilers.ResourceErrorAnnotation]
tc.Expect(errAnnotation).To(ContainSubstring("InvalidValuesForRequestParameters"))
tc.Expect(errAnnotation).To(ContainSubstring("Values for request parameters are invalid: keyPolicy.keyExpirationPeriodInDays."))
}

// There are two ways that a deployment can fail. It can be rejected when initially
// submitted to the Azure API, or it can be accepted and then report a failure during
// long running operation polling. This ensures that a resource in the second case
// can be updated to resolve the cause of the failure and successfully deployed.
func Test_DeploymentAccepted_LongRunningOperationFails_SucceedsAfterUpdate(t *testing.T) {
t.Parallel()

tc := globalTestContext.ForTest(t)
rg := tc.CreateNewTestResourceGroupAndWait()
acct := newStorageAccountWithInvalidKeyExpiration(tc, rg)
tc.CreateResourceAndWaitForFailure(acct)

// Remove the bad property and ensure we can successfully provision
patcher := tc.NewResourcePatcher(acct)
acct.Spec.KeyPolicy = nil
tc.PatchResourceAndWaitAfter(acct, patcher, armclient.FailedProvisioningState)

// Ensure that the old failure information was cleared away
objectKey, err := client.ObjectKeyFromObject(acct)
tc.Expect(err).ToNot(HaveOccurred())
updated := &storage.StorageAccount{}
tc.GetResource(objectKey, updated)

tc.Expect(updated.Annotations).ToNot(HaveKey(reconcilers.ResourceErrorAnnotation))
}

// There are two ways that a deployment can fail. It can be rejected when initially
// submitted to the Azure API, or it can be accepted and then report a failure during
// long running operation polling. This ensures that the first case is handled correctly.
func Test_DeploymentRejected(t *testing.T) {
t.Parallel()

tc := globalTestContext.ForTest(t)
rg := tc.CreateNewTestResourceGroupAndWait()
vmss := newVMSSWithInvalidPublisher(tc, rg)
tc.CreateResourceAndWaitForFailure(vmss)

errAnnotation := vmss.Annotations[reconcilers.ResourceErrorAnnotation]
tc.Expect(errAnnotation).To(ContainSubstring("InvalidTemplateDeployment"))
tc.Expect(errAnnotation).To(ContainSubstring("InvalidParameter"))
tc.Expect(errAnnotation).To(ContainSubstring("The value of parameter imageReference.publisher is invalid"))
}

// There are two ways that a deployment can fail. It can be rejected when initially
// submitted to the Azure API, or it can be accepted and then report a failure during
// long running operation polling. This ensures that a resource in the first case
// can be updated to resolve the cause of the failure and successfully deployed.
func Test_DeploymentRejected_SucceedsAfterUpdate(t *testing.T) {
t.Parallel()

tc := globalTestContext.ForTest(t)
rg := tc.CreateNewTestResourceGroupAndWait()

vnet := newVNETForVMSS(tc, testcommon.AsOwner(rg.ObjectMeta))
subnet := newSubnetForVMSS(tc, testcommon.AsOwner(vnet.ObjectMeta))
publicIPAddress := newPublicIPAddressForVMSS(tc, testcommon.AsOwner(rg.ObjectMeta))
loadBalancer := newLoadBalancerForVMSS(tc, rg, publicIPAddress)
tc.CreateResourcesAndWait(vnet, subnet, loadBalancer, publicIPAddress)
vmss := newVMSS(tc, rg, loadBalancer, subnet)
imgRef := vmss.Spec.VirtualMachineProfile.StorageProfile.ImageReference
originalImgRef := imgRef.DeepCopy()

// Set the VMSS to have an invalid image
imgRef.Publisher = to.StringPtr("this publisher")
imgRef.Offer = to.StringPtr("does not")
imgRef.Sku = to.StringPtr("exist")
imgRef.Version = to.StringPtr("latest")

tc.CreateResourceAndWaitForFailure(vmss)

// Now fix the image reference and the VMSS should successfully deploy
patcher := tc.NewResourcePatcher(vmss)
vmss.Spec.VirtualMachineProfile.StorageProfile.ImageReference = originalImgRef
tc.PatchResourceAndWaitAfter(vmss, patcher, armclient.FailedProvisioningState)

// Ensure that the old failure information was cleared away
objectKey, err := client.ObjectKeyFromObject(vmss)
tc.Expect(err).ToNot(HaveOccurred())
updated := &compute.VirtualMachineScaleSet{}
tc.GetResource(objectKey, updated)

tc.Expect(updated.Annotations).ToNot(HaveKey(reconcilers.ResourceErrorAnnotation))
}
Loading

0 comments on commit b7987fb

Please sign in to comment.