Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for node pool autoscaling #157

Merged
merged 4 commits into from
Jul 26, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
106 changes: 106 additions & 0 deletions google/resource_container_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ func resourceContainerNodePool() *schema.Resource {
return &schema.Resource{
Create: resourceContainerNodePoolCreate,
Read: resourceContainerNodePoolRead,
Update: resourceContainerNodePoolUpdate,
Delete: resourceContainerNodePoolDelete,
Exists: resourceContainerNodePoolExists,

Expand Down Expand Up @@ -55,6 +56,41 @@ func resourceContainerNodePool() *schema.Resource {
Required: true,
ForceNew: true,
},

"autoscaling": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"min_node_count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(int)

if value < 1 {
errors = append(errors, fmt.Errorf("%q must be >=1", k))
}
return
},
},

"max_node_count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(int)

if value < 1 {
errors = append(errors, fmt.Errorf("%q must be >=1", k))
}
return
},
},
},
},
},
},
}
}
Expand Down Expand Up @@ -85,6 +121,15 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e
InitialNodeCount: int64(nodeCount),
}

if v, ok := d.GetOk("autoscaling"); ok {
autoscaling := v.([]interface{})[0].(map[string]interface{})
nodePool.Autoscaling = &container.NodePoolAutoscaling{
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
}
}

req := &container.CreateNodePoolRequest{
NodePool: nodePool,
}
Expand Down Expand Up @@ -130,9 +175,70 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err
d.Set("name", nodePool.Name)
d.Set("initial_node_count", nodePool.InitialNodeCount)

autoscaling := []map[string]interface{}{}
if nodePool.Autoscaling != nil && nodePool.Autoscaling.Enabled {
autoscaling = []map[string]interface{}{
map[string]interface{}{
"min_node_count": nodePool.Autoscaling.MinNodeCount,
"max_node_count": nodePool.Autoscaling.MaxNodeCount,
},
}
}
d.Set("autoscaling", autoscaling)

return nil
}

func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)

project, err := getProject(d, config)
if err != nil {
return err
}

zone := d.Get("zone").(string)
name := d.Get("name").(string)
cluster := d.Get("cluster").(string)

if d.HasChange("autoscaling") {
update := &container.ClusterUpdate{
DesiredNodePoolId: name,
}
if v, ok := d.GetOk("autoscaling"); ok {
autoscaling := v.([]interface{})[0].(map[string]interface{})
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
}
} else {
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
Enabled: false,
}
}

req := &container.UpdateClusterRequest{
Update: update,
}
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zone, cluster, req).Do()
if err != nil {
return err
}

// Wait until it's updated
waitErr := containerOperationWait(config, op, project, zone, "updating GKE node pool", 10, 2)
if waitErr != nil {
return waitErr
}

log.Printf("[INFO] Updated autoscaling in Node Pool %s", d.Id())
}

return resourceContainerNodePoolRead(d, meta)
}

func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)

Expand Down
114 changes: 109 additions & 5 deletions google/resource_container_node_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,47 @@ import (
)

func TestAccContainerNodePool_basic(t *testing.T) {
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccContainerNodePool_basic,
Config: testAccContainerNodePool_basic(cluster, np),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches("google_container_node_pool.np"),
),
},
},
})
}

func TestAccContainerNodePool_autoscaling(t *testing.T) {
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccContainerNodePool_autoscaling(cluster, np),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches("google_container_node_pool.np"),
),
},
resource.TestStep{
Config: testAccContainerNodePool_updateAutoscaling(cluster, np),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches("google_container_node_pool.np"),
),
},
resource.TestStep{
Config: testAccContainerNodePool_basic(cluster, np),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches("google_container_node_pool.np"),
),
Expand Down Expand Up @@ -77,13 +111,32 @@ func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc {
return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d",
attributes["initial_node_count"], found.InitialNodeCount)
}

tfAS := attributes["autoscaling.#"] == "1"
if gcpAS := found.Autoscaling != nil && found.Autoscaling.Enabled == true; tfAS != gcpAS {
return fmt.Errorf("Mismatched autoscaling status. TF State: %t. GCP State: %t", tfAS, gcpAS)
}
if tfAS {
if tf := attributes["autoscaling.0.min_node_count"]; strconv.FormatInt(found.Autoscaling.MinNodeCount, 10) != tf {
return fmt.Errorf("Mismatched Autoscaling.MinNodeCount. TF State: %s. GCP State: %d",
tf, found.Autoscaling.MinNodeCount)
}

if tf := attributes["autoscaling.0.max_node_count"]; strconv.FormatInt(found.Autoscaling.MaxNodeCount, 10) != tf {
return fmt.Errorf("Mismatched Autoscaling.MaxNodeCount. TF State: %s. GCP State: %d",
tf, found.Autoscaling.MaxNodeCount)
}

}

return nil
}
}

var testAccContainerNodePool_basic = fmt.Sprintf(`
func testAccContainerNodePool_basic(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "tf-cluster-nodepool-test-%s"
name = "%s"
zone = "us-central1-a"
initial_node_count = 3

Expand All @@ -94,8 +147,59 @@ resource "google_container_cluster" "cluster" {
}

resource "google_container_node_pool" "np" {
name = "tf-nodepool-test-%s"
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
}`, acctest.RandString(10), acctest.RandString(10))
}`, cluster, np)
}

func testAccContainerNodePool_autoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 3

master_auth {
username = "mr.yoda"
password = "adoy.rm"
}
}

resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 3
}
}`, cluster, np)
}

func testAccContainerNodePool_updateAutoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 3

master_auth {
username = "mr.yoda"
password = "adoy.rm"
}
}

resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 5
}
}`, cluster, np)
}
10 changes: 10 additions & 0 deletions website/docs/r/container_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -67,3 +67,13 @@ resource "google_container_cluster" "primary" {

* `name_prefix` - (Optional) Creates a unique name for the node pool beginning
with the specified prefix. Conflicts with `name`.

* `autoscaling` - (Optional) Configuration required by cluster autoscaler to adjust
the size of the node pool to the current cluster usage. Structure is documented below.

The `autoscaling` block supports:

* `minNodeCount` - (Required) Minimum number of nodes in the NodePool. Must be >=1 and
<= `maxNodeCount`.

* `maxNodeCount` - (Required) Maximum number of nodes in the NodePool. Must be >= minNodeCount.