Skip to content

Commit

Permalink
r/(linux|windows)_virtual_machine_scale_set: conditionally updating t…
Browse files Browse the repository at this point in the history
…he instances
  • Loading branch information
tombuildsstuff committed Feb 10, 2020
1 parent 4d9e294 commit a04738d
Show file tree
Hide file tree
Showing 8 changed files with 278 additions and 202 deletions.
85 changes: 45 additions & 40 deletions azurerm/internal/services/compute/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,27 @@ import (
)

type Client struct {
AvailabilitySetsClient *compute.AvailabilitySetsClient
DedicatedHostsClient *compute.DedicatedHostsClient
DedicatedHostGroupsClient *compute.DedicatedHostGroupsClient
DisksClient *compute.DisksClient
DiskEncryptionSetsClient *compute.DiskEncryptionSetsClient
GalleriesClient *compute.GalleriesClient
GalleryImagesClient *compute.GalleryImagesClient
GalleryImageVersionsClient *compute.GalleryImageVersionsClient
ProximityPlacementGroupsClient *compute.ProximityPlacementGroupsClient
MarketplaceAgreementsClient *marketplaceordering.MarketplaceAgreementsClient
ImagesClient *compute.ImagesClient
SnapshotsClient *compute.SnapshotsClient
UsageClient *compute.UsageClient
VMExtensionImageClient *compute.VirtualMachineExtensionImagesClient
VMExtensionClient *compute.VirtualMachineExtensionsClient
VMScaleSetClient *compute.VirtualMachineScaleSetsClient
VMScaleSetExtensionsClient *compute.VirtualMachineScaleSetExtensionsClient
VMScaleSetVMsClient *compute.VirtualMachineScaleSetVMsClient
VMClient *compute.VirtualMachinesClient
VMImageClient *compute.VirtualMachineImagesClient
AvailabilitySetsClient *compute.AvailabilitySetsClient
DedicatedHostsClient *compute.DedicatedHostsClient
DedicatedHostGroupsClient *compute.DedicatedHostGroupsClient
DisksClient *compute.DisksClient
DiskEncryptionSetsClient *compute.DiskEncryptionSetsClient
GalleriesClient *compute.GalleriesClient
GalleryImagesClient *compute.GalleryImagesClient
GalleryImageVersionsClient *compute.GalleryImageVersionsClient
ProximityPlacementGroupsClient *compute.ProximityPlacementGroupsClient
MarketplaceAgreementsClient *marketplaceordering.MarketplaceAgreementsClient
ImagesClient *compute.ImagesClient
SnapshotsClient *compute.SnapshotsClient
UsageClient *compute.UsageClient
VMExtensionImageClient *compute.VirtualMachineExtensionImagesClient
VMExtensionClient *compute.VirtualMachineExtensionsClient
VMScaleSetClient *compute.VirtualMachineScaleSetsClient
VMScaleSetExtensionsClient *compute.VirtualMachineScaleSetExtensionsClient
VMScaleSetRollingUpgradesClient *compute.VirtualMachineScaleSetRollingUpgradesClient
VMScaleSetVMsClient *compute.VirtualMachineScaleSetVMsClient
VMClient *compute.VirtualMachinesClient
VMImageClient *compute.VirtualMachineImagesClient
}

func NewClient(o *common.ClientOptions) *Client {
Expand Down Expand Up @@ -84,32 +85,36 @@ func NewClient(o *common.ClientOptions) *Client {
vmScaleSetExtensionsClient := compute.NewVirtualMachineScaleSetExtensionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmScaleSetExtensionsClient.Client, o.ResourceManagerAuthorizer)

vmScaleSetRollingUpgradesClient := compute.NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmScaleSetRollingUpgradesClient.Client, o.ResourceManagerAuthorizer)

vmScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmScaleSetVMsClient.Client, o.ResourceManagerAuthorizer)

vmClient := compute.NewVirtualMachinesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmClient.Client, o.ResourceManagerAuthorizer)

return &Client{
AvailabilitySetsClient: &availabilitySetsClient,
DedicatedHostsClient: &dedicatedHostsClient,
DedicatedHostGroupsClient: &dedicatedHostGroupsClient,
DisksClient: &disksClient,
DiskEncryptionSetsClient: &diskEncryptionSetsClient,
GalleriesClient: &galleriesClient,
GalleryImagesClient: &galleryImagesClient,
GalleryImageVersionsClient: &galleryImageVersionsClient,
ImagesClient: &imagesClient,
MarketplaceAgreementsClient: &marketplaceAgreementsClient,
ProximityPlacementGroupsClient: &proximityPlacementGroupsClient,
SnapshotsClient: &snapshotsClient,
UsageClient: &usageClient,
VMExtensionImageClient: &vmExtensionImageClient,
VMExtensionClient: &vmExtensionClient,
VMScaleSetClient: &vmScaleSetClient,
VMScaleSetExtensionsClient: &vmScaleSetExtensionsClient,
VMScaleSetVMsClient: &vmScaleSetVMsClient,
VMClient: &vmClient,
VMImageClient: &vmImageClient,
AvailabilitySetsClient: &availabilitySetsClient,
DedicatedHostsClient: &dedicatedHostsClient,
DedicatedHostGroupsClient: &dedicatedHostGroupsClient,
DisksClient: &disksClient,
DiskEncryptionSetsClient: &diskEncryptionSetsClient,
GalleriesClient: &galleriesClient,
GalleryImagesClient: &galleryImagesClient,
GalleryImageVersionsClient: &galleryImageVersionsClient,
ImagesClient: &imagesClient,
MarketplaceAgreementsClient: &marketplaceAgreementsClient,
ProximityPlacementGroupsClient: &proximityPlacementGroupsClient,
SnapshotsClient: &snapshotsClient,
UsageClient: &usageClient,
VMExtensionImageClient: &vmExtensionImageClient,
VMExtensionClient: &vmExtensionClient,
VMScaleSetClient: &vmScaleSetClient,
VMScaleSetExtensionsClient: &vmScaleSetExtensionsClient,
VMScaleSetRollingUpgradesClient: &vmScaleSetRollingUpgradesClient,
VMScaleSetVMsClient: &vmScaleSetVMsClient,
VMClient: &vmClient,
VMImageClient: &vmImageClient,
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -499,6 +499,14 @@ func resourceArmLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta i
}
update := compute.VirtualMachineScaleSetUpdate{}

// first try and pull this from existing vm, which covers no changes being made to this block
automaticOSUpgradeIsEnabled := false
if policy := existing.VirtualMachineScaleSetProperties.UpgradePolicy; policy != nil {
if policy.AutomaticOSUpgradePolicy != nil && policy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade != nil {
automaticOSUpgradeIsEnabled = *policy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade
}
}

if d.HasChange("automatic_os_upgrade_policy") || d.HasChange("rolling_upgrade_policy") {
upgradePolicy := compute.UpgradePolicy{
Mode: compute.UpgradeMode(d.Get("upgrade_mode").(string)),
Expand All @@ -507,6 +515,10 @@ func resourceArmLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta i
if d.HasChange("automatic_os_upgrade_policy") {
automaticRaw := d.Get("automatic_os_upgrade_policy").([]interface{})
upgradePolicy.AutomaticOSUpgradePolicy = ExpandVirtualMachineScaleSetAutomaticUpgradePolicy(automaticRaw)

// however if this block has been changed then we need to pull it
// we can guarantee this always has a value since it'll have been expanded and thus is safe to de-ref
automaticOSUpgradeIsEnabled = *upgradePolicy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade
}

if d.HasChange("rolling_upgrade_policy") {
Expand Down Expand Up @@ -669,84 +681,18 @@ func resourceArmLinuxVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta i

update.VirtualMachineScaleSetUpdateProperties = &updateProps

log.Printf("[DEBUG] Updating Linux Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup)
future, err := client.Update(ctx, id.ResourceGroup, id.Name, update)
if err != nil {
return fmt.Errorf("Error updating Linux Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err)
}

log.Printf("[DEBUG] Waiting for update of Linux Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup)
if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for update of Linux Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err)
metaData := virtualMachineScaleSetUpdateMetaData{
AutomaticOSUpgradeIsEnabled: automaticOSUpgradeIsEnabled,
CanRollInstancesWhenRequired: meta.(*clients.Client).Features.VirtualMachineScaleSet.RollInstancesWhenRequired,
UpdateInstances: updateInstances,
Client: meta.(*clients.Client).Compute,
Existing: existing,
ID: id,
OSType: compute.Linux,
}
log.Printf("[DEBUG] Updated Linux Virtual Machine Scale Set %q (Resource Group %q).", id.Name, id.ResourceGroup)

// if we update the SKU, we also need to subsequently roll the instances using the `UpdateInstances` API
if updateInstances {
userWantsToRollInstances := meta.(*clients.Client).Features.VirtualMachineScaleSet.RollInstancesWhenRequired
if userWantsToRollInstances {
log.Printf("[DEBUG] Rolling the VM Instances for Linux Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup)
instancesClient := meta.(*clients.Client).Compute.VMScaleSetVMsClient
instances, err := instancesClient.ListComplete(ctx, id.ResourceGroup, id.Name, "", "", "")
if err != nil {
return fmt.Errorf("Error listing VM Instances for Linux Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err)
}

log.Printf("[DEBUG] Determining instances to roll..")
instanceIdsToRoll := make([]string, 0)
for instances.NotDone() {
instance := instances.Value()
props := instance.VirtualMachineScaleSetVMProperties
if props != nil && instance.InstanceID != nil {
latestModel := props.LatestModelApplied
if latestModel != nil || !*latestModel {
instanceIdsToRoll = append(instanceIdsToRoll, *instance.InstanceID)
}
}

if err := instances.NextWithContext(ctx); err != nil {
return fmt.Errorf("Error enumerating instances: %s", err)
}
}

// TODO: there's a performance enhancement to do batches here, but this is fine for a first pass
for _, instanceId := range instanceIdsToRoll {
instanceIds := []string{instanceId}

log.Printf("[DEBUG] Updating Instance %q to the Latest Configuration..", instanceId)
ids := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{
InstanceIds: &instanceIds,
}
future, err := client.UpdateInstances(ctx, id.ResourceGroup, id.Name, ids)
if err != nil {
return fmt.Errorf("Error updating Instance %q (Linux VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, id.Name, id.ResourceGroup, err)
}

if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for update of Instance %q (Linux VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, id.Name, id.ResourceGroup, err)
}
log.Printf("[DEBUG] Updated Instance %q to the Latest Configuration.", instanceId)

// TODO: does this want to be a separate, user-configurable toggle?
log.Printf("[DEBUG] Reimaging Instance %q..", instanceId)
reimageInput := &compute.VirtualMachineScaleSetReimageParameters{
InstanceIds: &instanceIds,
}
reimageFuture, err := client.Reimage(ctx, id.ResourceGroup, id.Name, reimageInput)
if err != nil {
return fmt.Errorf("Error reimaging Instance %q (Linux VM Scale Set %q / Resource Group %q): %+v", instanceId, id.Name, id.ResourceGroup, err)
}

if err = reimageFuture.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for reimage of Instance %q (Linux VM Scale Set %q / Resource Group %q): %+v", instanceId, id.Name, id.ResourceGroup, err)
}
log.Printf("[DEBUG] Reimaged Instance %q..", instanceId)
}

log.Printf("[DEBUG] Rolled the VM Instances for Linux Virtual Machine Scale Set %q (Resource Group %q).", id.Name, id.ResourceGroup)
} else {
log.Printf("[DEBUG] Terraform wants to roll the VM Instances for Linux Virtual Machine Scale Set %q (Resource Group %q) - but user has opted out - skipping..", id.Name, id.ResourceGroup)
}
if err := metaData.performUpdate(ctx, update); err != nil {
return err
}

return resourceArmLinuxVirtualMachineScaleSetRead(d, meta)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -530,6 +530,13 @@ func resourceArmWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta
update := compute.VirtualMachineScaleSetUpdate{}

upgradeMode := compute.UpgradeMode(d.Get("upgrade_mode").(string))
// first try and pull this from existing vm, which covers no changes being made to this block
automaticOSUpgradeIsEnabled := false
if policy := existing.VirtualMachineScaleSetProperties.UpgradePolicy; policy != nil {
if policy.AutomaticOSUpgradePolicy != nil && policy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade != nil {
automaticOSUpgradeIsEnabled = *policy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade
}
}
if d.HasChange("automatic_os_upgrade_policy") || d.HasChange("rolling_upgrade_policy") {
upgradePolicy := compute.UpgradePolicy{
Mode: upgradeMode,
Expand All @@ -538,6 +545,10 @@ func resourceArmWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta
if d.HasChange("automatic_os_upgrade_policy") {
automaticRaw := d.Get("automatic_os_upgrade_policy").([]interface{})
upgradePolicy.AutomaticOSUpgradePolicy = ExpandVirtualMachineScaleSetAutomaticUpgradePolicy(automaticRaw)

// however if this block has been changed then we need to pull it
// we can guarantee this always has a value since it'll have been expanded and thus is safe to de-ref
automaticOSUpgradeIsEnabled = *upgradePolicy.AutomaticOSUpgradePolicy.EnableAutomaticOSUpgrade
}

if d.HasChange("rolling_upgrade_policy") {
Expand Down Expand Up @@ -704,84 +715,18 @@ func resourceArmWindowsVirtualMachineScaleSetUpdate(d *schema.ResourceData, meta

update.VirtualMachineScaleSetUpdateProperties = &updateProps

log.Printf("[DEBUG] Updating Windows Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup)
future, err := client.Update(ctx, id.ResourceGroup, id.Name, update)
if err != nil {
return fmt.Errorf("Error updating Windows Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err)
metaData := virtualMachineScaleSetUpdateMetaData{
AutomaticOSUpgradeIsEnabled: automaticOSUpgradeIsEnabled,
CanRollInstancesWhenRequired: meta.(*clients.Client).Features.VirtualMachineScaleSet.RollInstancesWhenRequired,
UpdateInstances: updateInstances,
Client: meta.(*clients.Client).Compute,
Existing: existing,
ID: id,
OSType: compute.Windows,
}

log.Printf("[DEBUG] Waiting for update of Windows Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup)
if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for update of Windows Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err)
}
log.Printf("[DEBUG] Updated Windows Virtual Machine Scale Set %q (Resource Group %q).", id.Name, id.ResourceGroup)

// if we update the SKU, we also need to subsequently roll the instances using the `UpdateInstances` API
if updateInstances {
userWantsToRollInstances := meta.(*clients.Client).Features.VirtualMachineScaleSet.RollInstancesWhenRequired
if userWantsToRollInstances {
log.Printf("[DEBUG] Rolling the VM Instances for Windows Virtual Machine Scale Set %q (Resource Group %q)..", id.Name, id.ResourceGroup)
instancesClient := meta.(*clients.Client).Compute.VMScaleSetVMsClient
instances, err := instancesClient.ListComplete(ctx, id.ResourceGroup, id.Name, "", "", "")
if err != nil {
return fmt.Errorf("Error listing VM Instances for Windows Virtual Machine Scale Set %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err)
}

log.Printf("[DEBUG] Determining instances to roll..")
instanceIdsToRoll := make([]string, 0)
for instances.NotDone() {
instance := instances.Value()
props := instance.VirtualMachineScaleSetVMProperties
if props != nil && instance.InstanceID != nil {
latestModel := props.LatestModelApplied
if latestModel != nil || !*latestModel {
instanceIdsToRoll = append(instanceIdsToRoll, *instance.InstanceID)
}
}

if err := instances.NextWithContext(ctx); err != nil {
return fmt.Errorf("Error enumerating instances: %s", err)
}
}

// there's a performance enhancement to do batches here, but this is fine for a first pass
for _, instanceId := range instanceIdsToRoll {
instanceIds := []string{instanceId}

log.Printf("[DEBUG] Updating Instance %q to the Latest Configuration..", instanceId)
ids := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{
InstanceIds: &instanceIds,
}
future, err := client.UpdateInstances(ctx, id.ResourceGroup, id.Name, ids)
if err != nil {
return fmt.Errorf("Error updating Instance %q (Windows VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, id.Name, id.ResourceGroup, err)
}

if err = future.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for update of Instance %q (Windows VM Scale Set %q / Resource Group %q) to the Latest Configuration: %+v", instanceId, id.Name, id.ResourceGroup, err)
}
log.Printf("[DEBUG] Updated Instance %q to the Latest Configuration.", instanceId)

// TODO: does this want to be a separate, user-configurable toggle?
log.Printf("[DEBUG] Reimaging Instance %q..", instanceId)
reimageInput := &compute.VirtualMachineScaleSetReimageParameters{
InstanceIds: &instanceIds,
}
reimageFuture, err := client.Reimage(ctx, id.ResourceGroup, id.Name, reimageInput)
if err != nil {
return fmt.Errorf("Error reimaging Instance %q (Windows VM Scale Set %q / Resource Group %q): %+v", instanceId, id.Name, id.ResourceGroup, err)
}

if err = reimageFuture.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for reimage of Instance %q (Windows VM Scale Set %q / Resource Group %q): %+v", instanceId, id.Name, id.ResourceGroup, err)
}
log.Printf("[DEBUG] Reimaged Instance %q..", instanceId)
}

log.Printf("[DEBUG] Rolled the VM Instances for Windows Virtual Machine Scale Set %q (Resource Group %q).", id.Name, id.ResourceGroup)
} else {
log.Printf("[DEBUG] Terraform wants to roll the VM Instances for Windows Virtual Machine Scale Set %q (Resource Group %q) - but user has opted out - skipping..", id.Name, id.ResourceGroup)
}
if err := metaData.performUpdate(ctx, update); err != nil {
return err
}

return resourceArmWindowsVirtualMachineScaleSetRead(d, meta)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -298,9 +298,9 @@ resource "azurerm_linux_virtual_machine_scale_set" "test" {
}
rolling_upgrade_policy {
max_batch_instance_percent = 21
max_unhealthy_instance_percent = 22
max_unhealthy_upgraded_instance_percent = 23
max_batch_instance_percent = 100
max_unhealthy_instance_percent = 100
max_unhealthy_upgraded_instance_percent = 100
pause_time_between_batches = "PT30S"
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -298,9 +298,9 @@ resource "azurerm_windows_virtual_machine_scale_set" "test" {
}
rolling_upgrade_policy {
max_batch_instance_percent = 21
max_unhealthy_instance_percent = 22
max_unhealthy_upgraded_instance_percent = 23
max_batch_instance_percent = 100
max_unhealthy_instance_percent = 100
max_unhealthy_upgraded_instance_percent = 100
pause_time_between_batches = "PT30S"
}
Expand Down
Loading

0 comments on commit a04738d

Please sign in to comment.