From 571a6d333df63b66e8d44c2e87cb0a29ba60a324 Mon Sep 17 00:00:00 2001 From: Heng Lu Date: Tue, 14 Nov 2023 14:02:13 +0800 Subject: [PATCH 1/4] `azurerm_kubernetes_cluster`, `azurerm_kubernetes_cluster_node_pool` - support for the `gpu_instance_profile` property --- .../kubernetes_cluster_node_pool_resource.go | 18 +++++++ ...ernetes_cluster_node_pool_resource_test.go | 50 +++++++++++++++++++ .../kubernetes_cluster_other_resource_test.go | 46 +++++++++++++++++ .../containers/kubernetes_nodepool.go | 27 ++++++++++ .../docs/r/kubernetes_cluster.html.markdown | 2 + ...kubernetes_cluster_node_pool.html.markdown | 2 + 6 files changed, 145 insertions(+) diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/internal/services/containers/kubernetes_cluster_node_pool_resource.go index a6fd4def6548..3db1f70c1316 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -152,6 +152,19 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { ForceNew: true, }, + "gpu_instance_profile": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(agentpools.GPUInstanceProfileMIGOneg), + string(managedclusters.GPUInstanceProfileMIGTwog), + string(managedclusters.GPUInstanceProfileMIGThreeg), + string(managedclusters.GPUInstanceProfileMIGFourg), + string(managedclusters.GPUInstanceProfileMIGSeveng), + }, false), + }, + "kubelet_disk_type": { Type: pluginsdk.TypeString, Optional: true, @@ -468,6 +481,10 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int Count: utils.Int64(int64(count)), } + if gpuInstanceProfile := d.Get("gpu_instance_profile").(string); gpuInstanceProfile != "" { + profile.GpuInstanceProfile = utils.ToPtr(agentpools.GPUInstanceProfile(gpuInstanceProfile)) + } + if osSku := d.Get("os_sku").(string); osSku != "" { profile.OsSKU = utils.ToPtr(agentpools.OSSKU(osSku)) } @@ -839,6 +856,7 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter d.Set("enable_host_encryption", props.EnableEncryptionAtHost) d.Set("custom_ca_trust_enabled", props.EnableCustomCATrust) d.Set("fips_enabled", props.EnableFIPS) + d.Set("gpu_instance_profile", props.GpuInstanceProfile) d.Set("ultra_ssd_enabled", props.EnableUltraSSD) if v := props.KubeletDiskType; v != nil { diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go index 9ed7acb0efe5..6d088cc41dd0 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go @@ -1034,6 +1034,21 @@ func TestAccKubernetesClusterNodePool_snapshotId(t *testing.T) { }) } +func TestAccKubernetesClusterNodePool_gpuInstanceProfile(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + r := KubernetesClusterNodePoolResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.gpuInstanceProfile(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (t KubernetesClusterNodePoolResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := agentpools.ParseAgentPoolID(state.ID) if err != nil { @@ -2706,3 +2721,38 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { } `, data.Locations.Primary, data.RandomInteger, data.RandomString) } + +func (KubernetesClusterNodePoolResource) gpuInstanceProfile(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%[2]d" + location = "%[1]s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%[2]d" + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_D2s_v3" + } + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_ND96asr_v4" + gpu_instance_profile = "MIG1g" +} + `, data.Locations.Primary, data.RandomInteger) +} diff --git a/internal/services/containers/kubernetes_cluster_other_resource_test.go b/internal/services/containers/kubernetes_cluster_other_resource_test.go index 3f146fea8e25..c0d21274212a 100644 --- a/internal/services/containers/kubernetes_cluster_other_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_other_resource_test.go @@ -1043,6 +1043,21 @@ func TestAccKubernetesCluster_snapshotId(t *testing.T) { }) } +func TestAccKubernetesCluster_gpuInstanceProfile(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.gpuInstanceProfile(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + func (KubernetesClusterResource) basicAvailabilitySetConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -3103,3 +3118,34 @@ resource "azurerm_kubernetes_cluster" "test" { } `, data.Locations.Primary, data.RandomInteger, data.RandomString) } + +func (KubernetesClusterResource) gpuInstanceProfile(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%[2]d" + location = "%[1]s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%[2]d" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_ND96asr_v4" + gpu_instance_profile = "MIG1g" + } + + identity { + type = "SystemAssigned" + } +} + `, data.Locations.Primary, data.RandomInteger) +} diff --git a/internal/services/containers/kubernetes_nodepool.go b/internal/services/containers/kubernetes_nodepool.go index c25f20d29062..5be4cd9988f3 100644 --- a/internal/services/containers/kubernetes_nodepool.go +++ b/internal/services/containers/kubernetes_nodepool.go @@ -109,6 +109,19 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { ForceNew: true, }, + "gpu_instance_profile": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(managedclusters.GPUInstanceProfileMIGOneg), + string(managedclusters.GPUInstanceProfileMIGTwog), + string(managedclusters.GPUInstanceProfileMIGThreeg), + string(managedclusters.GPUInstanceProfileMIGFourg), + string(managedclusters.GPUInstanceProfileMIGSeveng), + }, false), + }, + "kubelet_disk_type": { Type: pluginsdk.TypeString, Optional: true, @@ -916,6 +929,10 @@ func ConvertDefaultNodePoolToAgentPool(input *[]managedclusters.ManagedClusterAg } } + if defaultCluster.GpuInstanceProfile != nil { + agentpool.Properties.GpuInstanceProfile = utils.ToPtr(agentpools.GPUInstanceProfile(*defaultCluster.GpuInstanceProfile)) + } + return agentpool } @@ -1046,6 +1063,10 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]managedclusters.Manage profile.CapacityReservationGroupID = utils.String(capacityReservationGroupId) } + if gpuInstanceProfile := raw["gpu_instance_profile"].(string); gpuInstanceProfile != "" { + profile.GpuInstanceProfile = utils.ToPtr(managedclusters.GPUInstanceProfile(gpuInstanceProfile)) + } + count := raw["node_count"].(int) maxCount := raw["max_count"].(int) minCount := raw["min_count"].(int) @@ -1325,6 +1346,11 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf enableHostEncryption = *agentPool.EnableEncryptionAtHost } + gpuInstanceProfile := "" + if agentPool.GpuInstanceProfile != nil { + gpuInstanceProfile = string(*agentPool.GpuInstanceProfile) + } + maxCount := 0 if agentPool.MaxCount != nil { maxCount = int(*agentPool.MaxCount) @@ -1471,6 +1497,7 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf "enable_host_encryption": enableHostEncryption, "custom_ca_trust_enabled": customCaTrustEnabled, "fips_enabled": enableFIPS, + "gpu_instance_profile": gpuInstanceProfile, "host_group_id": hostGroupID, "kubelet_disk_type": kubeletDiskType, "max_count": maxCount, diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 5979de995def..bb2053da7655 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -398,6 +398,8 @@ A `default_node_pool` block supports the following: * `enable_node_public_ip` - (Optional) Should nodes in this Node Pool have a Public IP Address? `temporary_name_for_rotation` must be specified when changing this property. +* `gpu_instance_profile` - (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. + * `host_group_id` - (Optional) Specifies the ID of the Host Group within which this AKS Cluster should be created. Changing this forces a new resource to be created. * `kubelet_config` - (Optional) A `kubelet_config` block as defined below. `temporary_name_for_rotation` must be specified when changing this block. diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index f309f5b684c0..dfb8d7b37b28 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -98,6 +98,8 @@ The following arguments are supported: ~> **Note:** FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview). +* `gpu_instance_profile` - (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. + * `kubelet_disk_type` - (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`. * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. From 972904713c3b602997a0573822b0d95d8c60ce51 Mon Sep 17 00:00:00 2001 From: Heng Lu Date: Tue, 14 Nov 2023 14:13:49 +0800 Subject: [PATCH 2/4] update --- .../containers/kubernetes_cluster_node_pool_resource.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 3db1f70c1316..6ccfd5dfa77c 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -856,13 +856,16 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter d.Set("enable_host_encryption", props.EnableEncryptionAtHost) d.Set("custom_ca_trust_enabled", props.EnableCustomCATrust) d.Set("fips_enabled", props.EnableFIPS) - d.Set("gpu_instance_profile", props.GpuInstanceProfile) d.Set("ultra_ssd_enabled", props.EnableUltraSSD) if v := props.KubeletDiskType; v != nil { d.Set("kubelet_disk_type", string(*v)) } + if v := props.GpuInstanceProfile; v != nil { + d.Set("gpu_instance_profile", string(*v)) + } + if props.CreationData != nil { d.Set("snapshot_id", props.CreationData.SourceResourceId) } From 690635d6497fb5f998902658f432447e07346076 Mon Sep 17 00:00:00 2001 From: Heng Lu Date: Fri, 17 Nov 2023 09:54:27 +0800 Subject: [PATCH 3/4] rename the field --- .../kubernetes_cluster_node_pool_resource.go | 6 +++--- .../kubernetes_cluster_node_pool_resource_test.go | 8 ++++---- .../kubernetes_cluster_other_resource_test.go | 14 +++++++------- .../services/containers/kubernetes_nodepool.go | 6 +++--- website/docs/r/kubernetes_cluster.html.markdown | 2 +- .../r/kubernetes_cluster_node_pool.html.markdown | 2 +- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 6ccfd5dfa77c..4e8c8ad1bce6 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -152,7 +152,7 @@ func resourceKubernetesClusterNodePoolSchema() map[string]*pluginsdk.Schema { ForceNew: true, }, - "gpu_instance_profile": { + "gpu_instance": { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, @@ -481,7 +481,7 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int Count: utils.Int64(int64(count)), } - if gpuInstanceProfile := d.Get("gpu_instance_profile").(string); gpuInstanceProfile != "" { + if gpuInstanceProfile := d.Get("gpu_instance").(string); gpuInstanceProfile != "" { profile.GpuInstanceProfile = utils.ToPtr(agentpools.GPUInstanceProfile(gpuInstanceProfile)) } @@ -863,7 +863,7 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter } if v := props.GpuInstanceProfile; v != nil { - d.Set("gpu_instance_profile", string(*v)) + d.Set("gpu_instance", string(*v)) } if props.CreationData != nil { diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go index 6d088cc41dd0..5a881b839e16 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go @@ -1034,13 +1034,13 @@ func TestAccKubernetesClusterNodePool_snapshotId(t *testing.T) { }) } -func TestAccKubernetesClusterNodePool_gpuInstanceProfile(t *testing.T) { +func TestAccKubernetesClusterNodePool_gpuInstance(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") r := KubernetesClusterNodePoolResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.gpuInstanceProfile(data), + Config: r.gpuInstance(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), @@ -2722,7 +2722,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { `, data.Locations.Primary, data.RandomInteger, data.RandomString) } -func (KubernetesClusterNodePoolResource) gpuInstanceProfile(data acceptance.TestData) string { +func (KubernetesClusterNodePoolResource) gpuInstance(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -2752,7 +2752,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { name = "internal" kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id vm_size = "Standard_ND96asr_v4" - gpu_instance_profile = "MIG1g" + gpu_instance = "MIG1g" } `, data.Locations.Primary, data.RandomInteger) } diff --git a/internal/services/containers/kubernetes_cluster_other_resource_test.go b/internal/services/containers/kubernetes_cluster_other_resource_test.go index c0d21274212a..5185d9191422 100644 --- a/internal/services/containers/kubernetes_cluster_other_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_other_resource_test.go @@ -1043,13 +1043,13 @@ func TestAccKubernetesCluster_snapshotId(t *testing.T) { }) } -func TestAccKubernetesCluster_gpuInstanceProfile(t *testing.T) { +func TestAccKubernetesCluster_gpuInstance(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") r := KubernetesClusterResource{} data.ResourceTest(t, r, []acceptance.TestStep{ { - Config: r.gpuInstanceProfile(data), + Config: r.gpuInstance(data), Check: acceptance.ComposeTestCheckFunc( check.That(data.ResourceName).ExistsInAzure(r), ), @@ -3119,7 +3119,7 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.Locations.Primary, data.RandomInteger, data.RandomString) } -func (KubernetesClusterResource) gpuInstanceProfile(data acceptance.TestData) string { +func (KubernetesClusterResource) gpuInstance(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -3137,10 +3137,10 @@ resource "azurerm_kubernetes_cluster" "test" { dns_prefix = "acctestaks%[2]d" default_node_pool { - name = "default" - node_count = 1 - vm_size = "Standard_ND96asr_v4" - gpu_instance_profile = "MIG1g" + name = "default" + node_count = 1 + vm_size = "Standard_ND96asr_v4" + gpu_instance = "MIG1g" } identity { diff --git a/internal/services/containers/kubernetes_nodepool.go b/internal/services/containers/kubernetes_nodepool.go index 5be4cd9988f3..3dbeada0a889 100644 --- a/internal/services/containers/kubernetes_nodepool.go +++ b/internal/services/containers/kubernetes_nodepool.go @@ -109,7 +109,7 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { ForceNew: true, }, - "gpu_instance_profile": { + "gpu_instance": { Type: pluginsdk.TypeString, Optional: true, ForceNew: true, @@ -1063,7 +1063,7 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]managedclusters.Manage profile.CapacityReservationGroupID = utils.String(capacityReservationGroupId) } - if gpuInstanceProfile := raw["gpu_instance_profile"].(string); gpuInstanceProfile != "" { + if gpuInstanceProfile := raw["gpu_instance"].(string); gpuInstanceProfile != "" { profile.GpuInstanceProfile = utils.ToPtr(managedclusters.GPUInstanceProfile(gpuInstanceProfile)) } @@ -1497,7 +1497,7 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf "enable_host_encryption": enableHostEncryption, "custom_ca_trust_enabled": customCaTrustEnabled, "fips_enabled": enableFIPS, - "gpu_instance_profile": gpuInstanceProfile, + "gpu_instance": gpuInstanceProfile, "host_group_id": hostGroupID, "kubelet_disk_type": kubeletDiskType, "max_count": maxCount, diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index bb2053da7655..e97a5c8f0cc1 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -398,7 +398,7 @@ A `default_node_pool` block supports the following: * `enable_node_public_ip` - (Optional) Should nodes in this Node Pool have a Public IP Address? `temporary_name_for_rotation` must be specified when changing this property. -* `gpu_instance_profile` - (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. +* `gpu_instance` - (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. * `host_group_id` - (Optional) Specifies the ID of the Host Group within which this AKS Cluster should be created. Changing this forces a new resource to be created. diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index dfb8d7b37b28..8089d0dac87d 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -98,7 +98,7 @@ The following arguments are supported: ~> **Note:** FIPS support is in Public Preview - more information and details on how to opt into the Preview can be found in [this article](https://docs.microsoft.com/azure/aks/use-multiple-node-pools#add-a-fips-enabled-node-pool-preview). -* `gpu_instance_profile` - (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. +* `gpu_instance` - (Optional) Specifies the GPU MIG instance profile for supported GPU VM SKU. The allowed values are `MIG1g`, `MIG2g`, `MIG3g`, `MIG4g` and `MIG7g`. Changing this forces a new resource to be created. * `kubelet_disk_type` - (Optional) The type of disk used by kubelet. Possible values are `OS` and `Temporary`. From 685ea70ed9f33a53269704125898f9da5890876e Mon Sep 17 00:00:00 2001 From: Heng Lu Date: Mon, 20 Nov 2023 12:57:24 +0800 Subject: [PATCH 4/4] change vmsize --- .../containers/kubernetes_cluster_node_pool_resource_test.go | 2 +- .../containers/kubernetes_cluster_other_resource_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go index 5a881b839e16..9d7e23a88227 100644 --- a/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_node_pool_resource_test.go @@ -2751,7 +2751,7 @@ resource "azurerm_kubernetes_cluster" "test" { resource "azurerm_kubernetes_cluster_node_pool" "test" { name = "internal" kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id - vm_size = "Standard_ND96asr_v4" + vm_size = "Standard_NC24ads_A100_v4" gpu_instance = "MIG1g" } `, data.Locations.Primary, data.RandomInteger) diff --git a/internal/services/containers/kubernetes_cluster_other_resource_test.go b/internal/services/containers/kubernetes_cluster_other_resource_test.go index 5185d9191422..a977aee2d1a3 100644 --- a/internal/services/containers/kubernetes_cluster_other_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_other_resource_test.go @@ -3139,7 +3139,7 @@ resource "azurerm_kubernetes_cluster" "test" { default_node_pool { name = "default" node_count = 1 - vm_size = "Standard_ND96asr_v4" + vm_size = "Standard_NC24ads_A100_v4" gpu_instance = "MIG1g" }