diff --git a/azurerm/data_source_kubernetes_cluster.go b/azurerm/data_source_kubernetes_cluster.go index dd584a5b55bb..1660549874fc 100644 --- a/azurerm/data_source_kubernetes_cluster.go +++ b/azurerm/data_source_kubernetes_cluster.go @@ -274,6 +274,19 @@ func dataSourceArmKubernetesCluster() *schema.Resource { }, }, + "windows_profile": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_username": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "network_profile": { Type: schema.TypeList, Computed: true, @@ -420,6 +433,11 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error setting `linux_profile`: %+v", err) } + windowsProfile := flattenKubernetesClusterDataSourceWindowsProfile(props.WindowsProfile) + if err := d.Set("windows_profile", windowsProfile); err != nil { + return fmt.Errorf("Error setting `windows_profile`: %+v", err) + } + networkProfile := flattenKubernetesClusterDataSourceNetworkProfile(props.NetworkProfile) if err := d.Set("network_profile", networkProfile); err != nil { return fmt.Errorf("Error setting `network_profile`: %+v", err) @@ -669,6 +687,19 @@ func flattenKubernetesClusterDataSourceLinuxProfile(input *containerservice.Linu return []interface{}{values} } +func flattenKubernetesClusterDataSourceWindowsProfile(input *containerservice.ManagedClusterWindowsProfile) []interface{} { + if input == nil { + return []interface{}{} + } + values := make(map[string]interface{}) + + if username := input.AdminUsername; username != nil { + values["admin_username"] = *username + } + + return []interface{}{values} +} + func flattenKubernetesClusterDataSourceNetworkProfile(profile *containerservice.NetworkProfileType) []interface{} { values := make(map[string]interface{}) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index d126077beb28..0c4f7151740a 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -336,6 +336,26 @@ func resourceArmKubernetesCluster() *schema.Resource { }, }, + "windows_profile": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_username": { + Type: schema.TypeString, + Required: true, + }, + "admin_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validate.NoEmptyStrings, + }, + }, + }, + }, + "network_profile": { Type: schema.TypeList, Optional: true, @@ -598,25 +618,13 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter if err != nil { return err } - + windowsProfile := expandKubernetesClusterWindowsProfile(d) servicePrincipalProfile := expandAzureRmKubernetesClusterServicePrincipal(d) networkProfile := expandKubernetesClusterNetworkProfile(d) addonProfiles := expandKubernetesClusterAddonProfiles(d) tags := d.Get("tags").(map[string]interface{}) - // we can't do this in the CustomizeDiff since the interpolations aren't evaluated at that point - if networkProfile != nil { - // ensure there's a Subnet ID attached - if networkProfile.NetworkPlugin == containerservice.Azure { - for _, profile := range agentProfiles { - if profile.VnetSubnetID == nil { - return fmt.Errorf("A `vnet_subnet_id` must be specified when the `network_plugin` is set to `azure`.") - } - } - } - } - rbacRaw := d.Get("role_based_access_control").([]interface{}) rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) @@ -635,6 +643,7 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter EnableRBAC: utils.Bool(rbacEnabled), KubernetesVersion: utils.String(kubernetesVersion), LinuxProfile: linuxProfile, + WindowsProfile: windowsProfile, NetworkProfile: networkProfile, ServicePrincipalProfile: servicePrincipalProfile, }, @@ -723,6 +732,11 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting `linux_profile`: %+v", err) } + windowsProfile := flattenKubernetesClusterWindowsProfile(props.WindowsProfile, d) + if err := d.Set("windows_profile", windowsProfile); err != nil { + return fmt.Errorf("Error setting `windows_profile`: %+v", err) + } + networkProfile := flattenKubernetesClusterNetworkProfile(props.NetworkProfile) if err := d.Set("network_profile", networkProfile); err != nil { return fmt.Errorf("Error setting `network_profile`: %+v", err) @@ -1109,6 +1123,26 @@ func expandKubernetesClusterLinuxProfile(d *schema.ResourceData) *containerservi return &profile } +func expandKubernetesClusterWindowsProfile(d *schema.ResourceData) *containerservice.ManagedClusterWindowsProfile { + profiles := d.Get("windows_profile").([]interface{}) + + if len(profiles) == 0 { + return nil + } + + config := profiles[0].(map[string]interface{}) + + adminUsername := config["admin_username"].(string) + adminPassword := config["admin_password"].(string) + + profile := containerservice.ManagedClusterWindowsProfile{ + AdminUsername: &adminUsername, + AdminPassword: &adminPassword, + } + + return &profile +} + func flattenKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile) []interface{} { if profile == nil { return []interface{}{} @@ -1138,6 +1172,25 @@ func flattenKubernetesClusterLinuxProfile(profile *containerservice.LinuxProfile return []interface{}{values} } +func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClusterWindowsProfile, d *schema.ResourceData) []interface{} { + if profile == nil { + return []interface{}{} + } + + values := make(map[string]interface{}) + + if username := profile.AdminUsername; username != nil { + values["admin_username"] = *username + } + + // admin password isn't returned, so let's look it up + if v, ok := d.GetOk("windows_profile.0.admin_password"); ok { + values["admin_password"] = v.(string) + } + + return []interface{}{values} +} + func expandKubernetesClusterNetworkProfile(d *schema.ResourceData) *containerservice.NetworkProfileType { configs := d.Get("network_profile").([]interface{}) if len(configs) == 0 { diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index fd5e5ae5cc6b..18d078817fd5 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -200,6 +200,44 @@ func TestAccAzureRMKubernetesCluster_linuxProfile(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := tf.AccRandTimeInt() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + config := testAccAzureRMKubernetesCluster_windowsProfile(ri, clientId, clientSecret, testLocation()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), + resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), + resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.1.max_pods"), + resource.TestCheckResourceAttrSet(resourceName, "linux_profile.0.admin_username"), + resource.TestCheckResourceAttrSet(resourceName, "windows_profile.0.admin_username"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"windows_profile.0.admin_password"}, + }, + }, + }) +} + func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { resourceName := "azurerm_kubernetes_cluster.test" ri := tf.AccRandTimeInt() @@ -883,6 +921,68 @@ resource "azurerm_kubernetes_cluster" "test" { `, rInt, location, rInt, rInt, rInt, clientId, clientSecret) } +func testAccAzureRMKubernetesCluster_windowsProfile(rInt int, clientId string, clientSecret string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + windows_profile { + admin_username = "azureuser" + admin_password = "pass_123-worD" + } + + agent_pool_profile { + name = "linux" + type = "VirtualMachineScaleSets" + count = "1" + vm_size = "Standard_DS2_v2" + max_pods = 30 + os_type = "Linux" + os_disk_size_gb = "30" + } + + agent_pool_profile { + name = "win" + type = "VirtualMachineScaleSets" + count = "1" + vm_size = "Standard_DS3_v2" + max_pods = 30 + os_type = "Windows" + os_disk_size_gb = "30" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "azure" + network_policy = "azure" + dns_service_ip = "10.10.0.10" + docker_bridge_cidr = "172.18.0.1/16" + service_cidr = "10.10.0.0/16" + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + func testAccAzureRMKubernetesCluster_addAgent(rInt int, clientId string, clientSecret string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 36fde5e7be29..551757c8ed99 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -58,6 +58,8 @@ The following attributes are exported: * `linux_profile` - A `linux_profile` block as documented below. +* `windows_profile` - A `windows_profile` block as documented below. + * `network_profile` - A `network_profile` block as documented below. * `node_resource_group` - Auto-generated Resource Group containing AKS Cluster resources. @@ -165,6 +167,12 @@ A `linux_profile` block exports the following: --- +A `windows_profile` block exports the following: + +* `admin_username` - The username associated with the administrator account of the Windows VMs. + +--- + A `network_profile` block exports the following: * `docker_bridge_cidr` - IP address (in CIDR notation) used as the Docker bridge IP address on nodes. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 7e2120fafb87..ad5db33c7789 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -89,6 +89,8 @@ The following arguments are supported: * `linux_profile` - (Optional) A `linux_profile` block. +* `windows_profile` - (Optional) A `windows_profile` block. + * `network_profile` - (Optional) A `network_profile` block. -> **NOTE:** If `network_profile` is not defined, `kubenet` profile will be used by default. @@ -170,6 +172,14 @@ A `linux_profile` block supports the following: --- +A `windows_profile` block supports the following: + +* `admin_username` - (Required) The Admin Username for Windows VMs. + +* `admin_password` - (Required) The Admin Password for Windows VMs. + +--- + A `network_profile` block supports the following: * `network_plugin` - (Required) Network plugin to use for networking. Currently supported values are `azure` and `kubenet`. Changing this forces a new resource to be created.