From 7dd15469a5aa97a973f0a5590a7c2ac84cb9373c Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 30 Oct 2015 23:55:00 +0000 Subject: [PATCH 01/30] Adding the ability to specify a snapshot window and retention limit for Redis ElastiCache clusters --- .../aws/resource_aws_elasticache_cluster.go | 37 ++++++++++++ .../resource_aws_elasticache_cluster_test.go | 58 +++++++++++++++++++ .../aws/r/elasticache_cluster.html.markdown | 9 +++ 3 files changed, 104 insertions(+) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 3460fb292ff8..dde2cd5e38cb 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -138,6 +138,24 @@ func resourceAwsElasticacheCluster() *schema.Resource { }, }, + "snapshot_window": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "snapshot_retention_limit": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(int) + if value > 35 { + es = append(es, fmt.Errorf( + "snapshot retention limit cannot be more than 35 days")) + } + return + }, + }, + "tags": tagsSchema(), // apply_immediately is used to determine when the update modifications @@ -187,6 +205,14 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.CacheParameterGroupName = aws.String(v.(string)) } + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("snapshot_window"); ok { + req.SnapshotWindow = aws.String(v.(string)) + } + if v, ok := d.GetOk("maintenance_window"); ok { req.PreferredMaintenanceWindow = aws.String(v.(string)) } @@ -261,6 +287,8 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) + d.Set("snapshot_window", c.SnapshotWindow) + d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) if c.NotificationConfiguration != nil { if *c.NotificationConfiguration.TopicStatus == "active" { d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) @@ -344,6 +372,15 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ requestUpdate = true } + if d.HasChange("snapshot_window") { + req.EngineVersion = aws.String(d.Get("snapshot_window").(string)) + requestUpdate = true + } + + if d.HasChange("snapshot_retention_limit") { + req.NumCacheNodes = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + } + if d.HasChange("num_cache_nodes") { req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) requestUpdate = true diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index b9306002886a..d084224fb1f2 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -33,6 +33,28 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) { }) } +func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { + var ec elasticache.CacheCluster + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSElasticacheClusterConfig_snapshots, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), + testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"), + ), + }, + }, + }) +} + func TestAccAWSElasticacheCluster_vpc(t *testing.T) { var csg elasticache.CacheSubnetGroup var ec elasticache.CacheCluster @@ -149,6 +171,42 @@ resource "aws_elasticache_cluster" "bar" { port = 11211 parameter_group_name = "default.memcached1.4" security_group_names = ["${aws_elasticache_security_group.bar.name}"] + snapshot_window = "05:00-09:00" + snapshot_retention_limit = 3 +} +`, genRandInt(), genRandInt(), genRandInt()) + +var testAccAWSElasticacheClusterConfig_snapshots = fmt.Sprintf(` +provider "aws" { + region = "us-east-1" +} +resource "aws_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + ingress { + from_port = -1 + to_port = -1 + protocol = "icmp" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_elasticache_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + security_group_names = ["${aws_security_group.bar.name}"] +} + +resource "aws_elasticache_cluster" "bar" { + cluster_id = "tf-test-%03d" + engine = "redis" + node_type = "cache.m1.small" + num_cache_nodes = 1 + port = 6379 + parameter_group_name = "default.redis2.8" + security_group_names = ["${aws_elasticache_security_group.bar.name}"] + snapshot_window = "05:00-09:00" + snapshot_retention_limit = 3 } `, genRandInt(), genRandInt(), genRandInt()) diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index ef1d69ed4a33..4a4cb4d76ed6 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -73,6 +73,15 @@ names to associate with this cache cluster Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. Example: `arn:aws:s3:::my_bucket/snapshot1.rdb` +* `snapshot_window` - (Optional) The daily time range (in UTC) during which ElastiCache will +begin taking a daily snapshot of your cache cluster. Can only be used for the Redis engine. Example: 05:00-09:00 + +* `snapshow_retention_limit` - (Optional) The number of days for which ElastiCache will +retain automatic cache cluster snapshots before deleting them. For example, if you set +SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days +before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. +Can only be used for the Redis engine. + * `notification_topic_arn` – (Optional) An Amazon Resource Name (ARN) of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` From 4f05df6cad9773c2bda92bae8def4017ec3d1041 Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 2 Nov 2015 20:57:04 +0000 Subject: [PATCH 02/30] When I was setting the update parameters for the Snapshotting, I didn't update the copy/pasted params --- builtin/providers/aws/resource_aws_elasticache_cluster.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index dde2cd5e38cb..a33321c3b464 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -373,12 +373,11 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ } if d.HasChange("snapshot_window") { - req.EngineVersion = aws.String(d.Get("snapshot_window").(string)) - requestUpdate = true + req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) } if d.HasChange("snapshot_retention_limit") { - req.NumCacheNodes = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) } if d.HasChange("num_cache_nodes") { From 707bfd739aa2bdd9748896e3cbc5b6e02d1e1077 Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 3 Nov 2015 12:35:06 +0000 Subject: [PATCH 03/30] Added an extra test for the Elasticache Cluster to show that updates work. Also added some debugging to show that the API returns the Elasticache retention period info --- .../aws/resource_aws_elasticache_cluster.go | 2 + .../resource_aws_elasticache_cluster_test.go | 67 +++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index a33321c3b464..a2c312d353be 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -287,7 +287,9 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) + log.Printf("[INFO] Found %s as the Snapshow Window", *c.SnapshotWindow) d.Set("snapshot_window", c.SnapshotWindow) + log.Printf("[INFO] Found %d as the Snapshow Retention Limit", *c.SnapshotRetentionLimit) d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) if c.NotificationConfiguration != nil { if *c.NotificationConfiguration.TopicStatus == "active" { diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index d084224fb1f2..666be2c8ab76 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -54,6 +54,39 @@ func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { }, }) } +func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { + var ec elasticache.CacheCluster + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSElasticacheClusterConfig_snapshots, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), + testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"), + ), + }, + + resource.TestStep{ + Config: testAccAWSElasticacheClusterConfig_snapshotsUpdated, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), + testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_window", "07:00-09:00"), + resource.TestCheckResourceAttr( + "aws_elasticache_cluster.bar", "snapshot_retention_limit", "7"), + ), + }, + }, + }) +} func TestAccAWSElasticacheCluster_vpc(t *testing.T) { var csg elasticache.CacheSubnetGroup @@ -210,6 +243,40 @@ resource "aws_elasticache_cluster" "bar" { } `, genRandInt(), genRandInt(), genRandInt()) +var testAccAWSElasticacheClusterConfig_snapshotsUpdated = fmt.Sprintf(` +provider "aws" { + region = "us-east-1" +} +resource "aws_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + ingress { + from_port = -1 + to_port = -1 + protocol = "icmp" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_elasticache_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + security_group_names = ["${aws_security_group.bar.name}"] +} + +resource "aws_elasticache_cluster" "bar" { + cluster_id = "tf-test-%03d" + engine = "redis" + node_type = "cache.m1.small" + num_cache_nodes = 1 + port = 6379 + parameter_group_name = "default.redis2.8" + security_group_names = ["${aws_elasticache_security_group.bar.name}"] + snapshot_window = "07:00-09:00" + snapshot_retention_limit = 7 +} +`, genRandInt(), genRandInt(), genRandInt()) + var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(` resource "aws_vpc" "foo" { cidr_block = "192.168.0.0/16" From ca2ea80af36ddd1907c0daa8bcd74e4985301145 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 5 Nov 2015 12:23:07 +0000 Subject: [PATCH 04/30] Making the changes to the snapshotting for Elasticache Redis as per @catsby's findings --- .../aws/resource_aws_elasticache_cluster.go | 37 +++++++++++-------- .../resource_aws_elasticache_cluster_test.go | 34 ++++++++++------- 2 files changed, 43 insertions(+), 28 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index a2c312d353be..69777a86632a 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -205,12 +205,14 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.CacheParameterGroupName = aws.String(v.(string)) } - if v, ok := d.GetOk("snapshot_retention_limit"); ok { - req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) - } + if !strings.Contains(d.Get("node_type").(string), "cache.t2") { + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + } - if v, ok := d.GetOk("snapshot_window"); ok { - req.SnapshotWindow = aws.String(v.(string)) + if v, ok := d.GetOk("snapshot_window"); ok { + req.SnapshotWindow = aws.String(v.(string)) + } } if v, ok := d.GetOk("maintenance_window"); ok { @@ -287,10 +289,12 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) - log.Printf("[INFO] Found %s as the Snapshow Window", *c.SnapshotWindow) - d.Set("snapshot_window", c.SnapshotWindow) - log.Printf("[INFO] Found %d as the Snapshow Retention Limit", *c.SnapshotRetentionLimit) - d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) + if c.SnapshotWindow != nil { + d.Set("snapshot_window", c.SnapshotWindow) + } + if c.SnapshotRetentionLimit != nil { + d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) + } if c.NotificationConfiguration != nil { if *c.NotificationConfiguration.TopicStatus == "active" { d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) @@ -373,13 +377,16 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ req.EngineVersion = aws.String(d.Get("engine_version").(string)) requestUpdate = true } + if !strings.Contains(d.Get("node_type").(string), "cache.t2") { + if d.HasChange("snapshot_window") { + req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) + requestUpdate = true + } - if d.HasChange("snapshot_window") { - req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) - } - - if d.HasChange("snapshot_retention_limit") { - req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + if d.HasChange("snapshot_retention_limit") { + req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + requestUpdate = true + } } if d.HasChange("num_cache_nodes") { diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index 666be2c8ab76..78e28763dc50 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -35,13 +35,17 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) { func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { var ec elasticache.CacheCluster + + ri := genRandInt() + config := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSElasticacheClusterConfig_snapshots, + Config: config, Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), @@ -56,13 +60,18 @@ func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { } func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { var ec elasticache.CacheCluster + + ri := genRandInt() + preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri) + postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSElasticacheClusterConfig_snapshots, + Config: preConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), @@ -74,7 +83,7 @@ func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { }, resource.TestStep{ - Config: testAccAWSElasticacheClusterConfig_snapshotsUpdated, + Config: postConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), @@ -204,17 +213,15 @@ resource "aws_elasticache_cluster" "bar" { port = 11211 parameter_group_name = "default.memcached1.4" security_group_names = ["${aws_elasticache_security_group.bar.name}"] - snapshot_window = "05:00-09:00" - snapshot_retention_limit = 3 } `, genRandInt(), genRandInt(), genRandInt()) -var testAccAWSElasticacheClusterConfig_snapshots = fmt.Sprintf(` +var testAccAWSElasticacheClusterConfig_snapshots = ` provider "aws" { region = "us-east-1" } resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" + name = "tf-test-security-group" description = "tf-test-security-group-descr" ingress { from_port = -1 @@ -225,7 +232,7 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" + name = "tf-test-security-group" description = "tf-test-security-group-descr" security_group_names = ["${aws_security_group.bar.name}"] } @@ -241,14 +248,14 @@ resource "aws_elasticache_cluster" "bar" { snapshot_window = "05:00-09:00" snapshot_retention_limit = 3 } -`, genRandInt(), genRandInt(), genRandInt()) +` -var testAccAWSElasticacheClusterConfig_snapshotsUpdated = fmt.Sprintf(` +var testAccAWSElasticacheClusterConfig_snapshotsUpdated = ` provider "aws" { region = "us-east-1" } resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" + name = "tf-test-security-group" description = "tf-test-security-group-descr" ingress { from_port = -1 @@ -259,7 +266,7 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group-%03d" + name = "tf-test-security-group" description = "tf-test-security-group-descr" security_group_names = ["${aws_security_group.bar.name}"] } @@ -274,8 +281,9 @@ resource "aws_elasticache_cluster" "bar" { security_group_names = ["${aws_elasticache_security_group.bar.name}"] snapshot_window = "07:00-09:00" snapshot_retention_limit = 7 + apply_immediately = true } -`, genRandInt(), genRandInt(), genRandInt()) +` var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(` resource "aws_vpc" "foo" { From 350f91ec063b0057ea7fd37ef87227ca6225ad3a Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 6 Nov 2015 11:16:51 +0000 Subject: [PATCH 05/30] Removing the instance_type check in the ElastiCache cluster creation. We now allow the error to bubble up to the userr when the wrong instance type is used. The limitation for t2 instance types now allowing snapshotting is also now documented --- .../aws/resource_aws_elasticache_cluster.go | 37 ++++++++----------- .../resource_aws_elasticache_cluster_test.go | 35 +++--------------- .../aws/r/elasticache_cluster.html.markdown | 1 + 3 files changed, 21 insertions(+), 52 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 69777a86632a..6f178b71e319 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -205,14 +205,12 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.CacheParameterGroupName = aws.String(v.(string)) } - if !strings.Contains(d.Get("node_type").(string), "cache.t2") { - if v, ok := d.GetOk("snapshot_retention_limit"); ok { - req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) - } + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + req.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + } - if v, ok := d.GetOk("snapshot_window"); ok { - req.SnapshotWindow = aws.String(v.(string)) - } + if v, ok := d.GetOk("snapshot_window"); ok { + req.SnapshotWindow = aws.String(v.(string)) } if v, ok := d.GetOk("maintenance_window"); ok { @@ -289,12 +287,8 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("security_group_ids", c.SecurityGroups) d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("maintenance_window", c.PreferredMaintenanceWindow) - if c.SnapshotWindow != nil { - d.Set("snapshot_window", c.SnapshotWindow) - } - if c.SnapshotRetentionLimit != nil { - d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) - } + d.Set("snapshot_window", c.SnapshotWindow) + d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) if c.NotificationConfiguration != nil { if *c.NotificationConfiguration.TopicStatus == "active" { d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) @@ -377,16 +371,15 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ req.EngineVersion = aws.String(d.Get("engine_version").(string)) requestUpdate = true } - if !strings.Contains(d.Get("node_type").(string), "cache.t2") { - if d.HasChange("snapshot_window") { - req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) - requestUpdate = true - } - if d.HasChange("snapshot_retention_limit") { - req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) - requestUpdate = true - } + if d.HasChange("snapshot_window") { + req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string)) + requestUpdate = true + } + + if d.HasChange("snapshot_retention_limit") { + req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + requestUpdate = true } if d.HasChange("num_cache_nodes") { diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index 78e28763dc50..0620ef47bae2 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -33,37 +33,12 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) { }) } -func TestAccAWSElasticacheCluster_snapshots(t *testing.T) { - var ec elasticache.CacheCluster - - ri := genRandInt() - config := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), - testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"), - resource.TestCheckResourceAttr( - "aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"), - ), - }, - }, - }) -} func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) { var ec elasticache.CacheCluster ri := genRandInt() - preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri) - postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri) + preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -221,7 +196,7 @@ provider "aws" { region = "us-east-1" } resource "aws_security_group" "bar" { - name = "tf-test-security-group" + name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" ingress { from_port = -1 @@ -232,7 +207,7 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group" + name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" security_group_names = ["${aws_security_group.bar.name}"] } @@ -240,7 +215,7 @@ resource "aws_elasticache_security_group" "bar" { resource "aws_elasticache_cluster" "bar" { cluster_id = "tf-test-%03d" engine = "redis" - node_type = "cache.m1.small" + node_type = "cache.t2.small" num_cache_nodes = 1 port = 6379 parameter_group_name = "default.redis2.8" diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index 4a4cb4d76ed6..e39d6172a714 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -88,6 +88,7 @@ SNS topic to send ElastiCache notifications to. Example: * `tags` - (Optional) A mapping of tags to assign to the resource. +~> **NOTE:** Snapshotting functionality is not compatible with t2 instance types. ## Attributes Reference From 0e9397fc741fa69651a9dfdf2f2ef1881fd7d769 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Fri, 6 Nov 2015 21:52:30 +0000 Subject: [PATCH 06/30] provider/openstack: Security Group Rules Fix This commit fixes how security group rules are read by Terraform and enables them to be correctly removed when the rule resource is modified. --- .../resource_openstack_compute_secgroup_v2.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go index 3cc0cbf0cc64..202c4d979499 100644 --- a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go @@ -131,10 +131,10 @@ func resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) err d.Set("description", sg.Description) rtm := rulesToMap(sg.Rules) for _, v := range rtm { - if v["group"] == d.Get("name") { - v["self"] = "1" + if v["from_group_id"] == d.Get("name") { + v["self"] = true } else { - v["self"] = "0" + v["self"] = false } } log.Printf("[DEBUG] rulesToMap(sg.Rules): %+v", rtm) @@ -283,12 +283,12 @@ func rulesToMap(sgrs []secgroups.Rule) []map[string]interface{} { sgrMap := make([]map[string]interface{}, len(sgrs)) for i, sgr := range sgrs { sgrMap[i] = map[string]interface{}{ - "id": sgr.ID, - "from_port": sgr.FromPort, - "to_port": sgr.ToPort, - "ip_protocol": sgr.IPProtocol, - "cidr": sgr.IPRange.CIDR, - "group": sgr.Group.Name, + "id": sgr.ID, + "from_port": sgr.FromPort, + "to_port": sgr.ToPort, + "ip_protocol": sgr.IPProtocol, + "cidr": sgr.IPRange.CIDR, + "from_group_id": sgr.Group.Name, } } return sgrMap From 1ab69ab6383b65b4775b955fa4e2db6f9b92f2ed Mon Sep 17 00:00:00 2001 From: Will Stern Date: Fri, 6 Nov 2015 15:55:58 -0600 Subject: [PATCH 07/30] typo in cluster_identifier --- .../docs/providers/aws/r/rds_cluster_instance.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 0d64a3f8769a..c124713b3829 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -27,7 +27,7 @@ For more information on Amazon Aurora, see [Aurora on Amazon RDS][2] in the Amaz resource "aws_rds_cluster_instance" "cluster_instances" { count = 2 identifier = "aurora-cluster-demo" - cluster_identifer = "${aws_rds_cluster.default.id}" + cluster_identifier = "${aws_rds_cluster.default.id}" instance_class = "db.r3.large" } From 327bd4f9c05afa1634358b2348c3ef9d615629c2 Mon Sep 17 00:00:00 2001 From: Rob Zienert Date: Fri, 6 Nov 2015 15:27:47 -0600 Subject: [PATCH 08/30] Adding S3 support for Lambda provider --- .../aws/resource_aws_lambda_function.go | 64 ++++++++++++++----- .../aws/r/lambda_function.html.markdown | 5 +- 2 files changed, 52 insertions(+), 17 deletions(-) diff --git a/builtin/providers/aws/resource_aws_lambda_function.go b/builtin/providers/aws/resource_aws_lambda_function.go index 4ce8981744b1..324016455e75 100644 --- a/builtin/providers/aws/resource_aws_lambda_function.go +++ b/builtin/providers/aws/resource_aws_lambda_function.go @@ -13,6 +13,8 @@ import ( "github.com/aws/aws-sdk-go/service/lambda" "github.com/mitchellh/go-homedir" + "errors" + "github.com/hashicorp/terraform/helper/schema" ) @@ -25,13 +27,28 @@ func resourceAwsLambdaFunction() *schema.Resource { Schema: map[string]*schema.Schema{ "filename": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"s3_bucket", "s3_key", "s3_object_version"}, + }, + "s3_bucket": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"filename"}, + }, + "s3_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"filename"}, + }, + "s3_object_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"filename"}, }, "description": &schema.Schema{ Type: schema.TypeString, Optional: true, - ForceNew: true, // TODO make this editable }, "function_name": &schema.Schema{ Type: schema.TypeString, @@ -93,22 +110,36 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Creating Lambda Function %s with role %s", functionName, iamRole) - filename, err := homedir.Expand(d.Get("filename").(string)) - if err != nil { - return err - } - zipfile, err := ioutil.ReadFile(filename) - if err != nil { - return err + var functionCode *lambda.FunctionCode + if v, ok := d.GetOk("filename"); ok { + filename, err := homedir.Expand(v.(string)) + if err != nil { + return err + } + zipfile, err := ioutil.ReadFile(filename) + if err != nil { + return err + } + d.Set("source_code_hash", sha256.Sum256(zipfile)) + functionCode = &lambda.FunctionCode{ + ZipFile: zipfile, + } + } else { + s3Bucket, bucketOk := d.GetOk("s3_bucket") + s3Key, keyOk := d.GetOk("s3_key") + s3ObjectVersion, versionOk := d.GetOk("s3_object_version") + if !bucketOk || !keyOk || !versionOk { + return errors.New("s3_bucket, s3_key and s3_object_version must all be set while using S3 code source") + } + functionCode = &lambda.FunctionCode{ + S3Bucket: aws.String(s3Bucket.(string)), + S3Key: aws.String(s3Key.(string)), + S3ObjectVersion: aws.String(s3ObjectVersion.(string)), + } } - d.Set("source_code_hash", sha256.Sum256(zipfile)) - - log.Printf("[DEBUG] ") params := &lambda.CreateFunctionInput{ - Code: &lambda.FunctionCode{ - ZipFile: zipfile, - }, + Code: functionCode, Description: aws.String(d.Get("description").(string)), FunctionName: aws.String(functionName), Handler: aws.String(d.Get("handler").(string)), @@ -118,6 +149,7 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e Timeout: aws.Int64(int64(d.Get("timeout").(int))), } + var err error for i := 0; i < 5; i++ { _, err = conn.CreateFunction(params) if awsErr, ok := err.(awserr.Error); ok { diff --git a/website/source/docs/providers/aws/r/lambda_function.html.markdown b/website/source/docs/providers/aws/r/lambda_function.html.markdown index 4c931fbada71..f9c1ea4a3f68 100644 --- a/website/source/docs/providers/aws/r/lambda_function.html.markdown +++ b/website/source/docs/providers/aws/r/lambda_function.html.markdown @@ -44,7 +44,10 @@ resource "aws_lambda_function" "test_lambda" { ## Argument Reference -* `filename` - (Required) A [zip file][2] containing your lambda function source code. +* `filename` - (Optional) A [zip file][2] containing your lambda function source code. If defined, The `s3_*` options cannot be used. +* `s3_bucket` - (Optional) The S3 bucket location containing your lambda function source code. Conflicts with `filename`. +* `s3_key` - (Optional) The S3 key containing your lambda function source code. Conflicts with `filename`. +* `s3_object_version` - (Optional) The object version of your lambda function source code. Conflicts with `filename`. * `function_name` - (Required) A unique name for your Lambda Function. * `handler` - (Required) The function [entrypoint][3] in your code. * `role` - (Required) IAM role attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See [Lambda Permission Model][4] for more details. From 996f88acd8cab692cd26ea32c2c63dcb26dcb21a Mon Sep 17 00:00:00 2001 From: Clint Date: Fri, 6 Nov 2015 16:17:22 -0600 Subject: [PATCH 09/30] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5215c84e3c10..cdb5a393bb7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ IMPROVEMENTS: * provider/aws: Add notification topic ARN for ElastiCache clusters [GH-3674] * provider/aws: Add `kinesis_endpoint` for configuring Kinesis [GH-3255] * provider/aws: Add a computed ARN for S3 Buckets [GH-3685] + * provider/aws: Add S3 support for Lambda Function resource [GH-3794] * provider/aws: Add configuration to enable copying RDS tags to final snapshot [GH-3529] * provider/aws: RDS Cluster additions (`backup_retention_period`, `preferred_backup_window`, `preferred_maintenance_window`) [GH-3757] * provider/openstack: Use IPv4 as the defeault IP version for subnets [GH-3091] From dbd2a43f464b80ab3c3893b7280e87d7105ce92f Mon Sep 17 00:00:00 2001 From: clint shryock Date: Fri, 6 Nov 2015 16:55:04 -0600 Subject: [PATCH 10/30] config updates for ElastiCache test --- .../providers/aws/resource_aws_elasticache_cluster_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index 0620ef47bae2..a17c5d9b1e4f 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -215,7 +215,7 @@ resource "aws_elasticache_security_group" "bar" { resource "aws_elasticache_cluster" "bar" { cluster_id = "tf-test-%03d" engine = "redis" - node_type = "cache.t2.small" + node_type = "cache.m1.small" num_cache_nodes = 1 port = 6379 parameter_group_name = "default.redis2.8" @@ -230,7 +230,7 @@ provider "aws" { region = "us-east-1" } resource "aws_security_group" "bar" { - name = "tf-test-security-group" + name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" ingress { from_port = -1 @@ -241,7 +241,7 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_security_group" "bar" { - name = "tf-test-security-group" + name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" security_group_names = ["${aws_security_group.bar.name}"] } From c40f689201336b363d722e56d61003c1aacc4587 Mon Sep 17 00:00:00 2001 From: Clint Date: Fri, 6 Nov 2015 16:56:54 -0600 Subject: [PATCH 11/30] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cdb5a393bb7b..1399eca4fbc0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ IMPROVEMENTS: * provider/aws: Add `kinesis_endpoint` for configuring Kinesis [GH-3255] * provider/aws: Add a computed ARN for S3 Buckets [GH-3685] * provider/aws: Add S3 support for Lambda Function resource [GH-3794] + * provider/aws: Add snapshot window and retention limits for ElastiCache (Redis) [GH-3707] * provider/aws: Add configuration to enable copying RDS tags to final snapshot [GH-3529] * provider/aws: RDS Cluster additions (`backup_retention_period`, `preferred_backup_window`, `preferred_maintenance_window`) [GH-3757] * provider/openstack: Use IPv4 as the defeault IP version for subnets [GH-3091] From a727b0c415981afb9ff29d351b603aef120d7340 Mon Sep 17 00:00:00 2001 From: Joe Gross Date: Fri, 6 Nov 2015 19:02:05 -0800 Subject: [PATCH 12/30] add closing quote --- website/source/intro/getting-started/variables.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/variables.html.md b/website/source/intro/getting-started/variables.html.md index 41e828a7244b..24154ca25d23 100644 --- a/website/source/intro/getting-started/variables.html.md +++ b/website/source/intro/getting-started/variables.html.md @@ -186,7 +186,7 @@ And access them via `lookup()`: ``` output "ami" { - value = "${lookup(var.amis, var.region)} + value = "${lookup(var.amis, var.region)}" } ``` From 98a441314b7aa631ef6753d14f32b07486bf6dc5 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sat, 7 Nov 2015 04:01:50 +0000 Subject: [PATCH 13/30] provider/openstack: Make Security Groups Computed This commit makes security groups in the openstack_compute_instance_v2 resource computed. This fixes issues where a security group is omitted which causes the instance to have the "default" group applied. When re-applying without this patch, an error will occur. --- .../openstack/resource_openstack_compute_instance_v2.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go index 4cf68de038ab..d21e1afedbec 100644 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go @@ -95,6 +95,7 @@ func resourceComputeInstanceV2() *schema.Resource { Type: schema.TypeSet, Optional: true, ForceNew: false, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, From ad1c28990c37324575a1249a5f021ae45255a327 Mon Sep 17 00:00:00 2001 From: Eddie Forson Date: Sat, 7 Nov 2015 13:35:21 +0000 Subject: [PATCH 14/30] providers/google: add pubsub auth endpoint #3803 --- builtin/providers/google/service_scope.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/builtin/providers/google/service_scope.go b/builtin/providers/google/service_scope.go index d4c518125167..5770dbeaa139 100644 --- a/builtin/providers/google/service_scope.go +++ b/builtin/providers/google/service_scope.go @@ -11,6 +11,7 @@ func canonicalizeServiceScope(scope string) string { "datastore": "https://www.googleapis.com/auth/datastore", "logging-write": "https://www.googleapis.com/auth/logging.write", "monitoring": "https://www.googleapis.com/auth/monitoring", + "pubsub": "https://www.googleapis.com/auth/pubsub", "sql": "https://www.googleapis.com/auth/sqlservice", "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", @@ -22,9 +23,9 @@ func canonicalizeServiceScope(scope string) string { "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", } - if matchedUrl, ok := scopeMap[scope]; ok { - return matchedUrl - } else { - return scope + if matchedURL, ok := scopeMap[scope]; ok { + return matchedURL } + + return scope } From dc96d7e61bd1c12897060cca1a94eec077887321 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sat, 7 Nov 2015 12:32:22 -0700 Subject: [PATCH 15/30] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1399eca4fbc0..a472b61bd8f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,8 @@ BUG FIXES: * provider/openstack: Fix boot from volume [GH-3206] * provider/openstack: Fix crashing when image is no longer accessible [GH-2189] * provider/openstack: Better handling of network resource state changes [GH-3712] + * provider/openstack: Fix issue preventing security group rules from being removed [GH-3796] + * provider/openstack: Fix crashing when no security group is specified [GH-3801] ## 0.6.6 (October 23, 2015) From 02f512d4bd665c2983a22af8d51d368c509049c1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 7 Nov 2015 16:53:42 -0800 Subject: [PATCH 16/30] config: new HCL API --- config/loader.go | 2 +- config/loader_hcl.go | 475 ++++++++++++++++++++----------------------- 2 files changed, 227 insertions(+), 250 deletions(-) diff --git a/config/loader.go b/config/loader.go index 5711ce8ef83d..c9a1295fe1ce 100644 --- a/config/loader.go +++ b/config/loader.go @@ -25,7 +25,7 @@ func LoadJSON(raw json.RawMessage) (*Config, error) { // Start building the result hclConfig := &hclConfigurable{ - Object: obj, + Root: obj, } return hclConfig.Config() diff --git a/config/loader_hcl.go b/config/loader_hcl.go index f451a31d154d..07b0eef84a0e 100644 --- a/config/loader_hcl.go +++ b/config/loader_hcl.go @@ -5,15 +5,15 @@ import ( "io/ioutil" "github.com/hashicorp/hcl" - hclobj "github.com/hashicorp/hcl/hcl" + "github.com/hashicorp/hcl/hcl/ast" "github.com/mitchellh/mapstructure" ) // hclConfigurable is an implementation of configurable that knows // how to turn HCL configuration into a *Config object. type hclConfigurable struct { - File string - Object *hclobj.Object + File string + Root *ast.File } func (t *hclConfigurable) Config() (*Config, error) { @@ -36,7 +36,13 @@ func (t *hclConfigurable) Config() (*Config, error) { Variable map[string]*hclVariable } - if err := hcl.DecodeObject(&rawConfig, t.Object); err != nil { + // Top-level item should be the object list + list, ok := t.Root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + if err := hcl.DecodeObject(&rawConfig, list); err != nil { return nil, err } @@ -73,7 +79,7 @@ func (t *hclConfigurable) Config() (*Config, error) { } // Get Atlas configuration - if atlas := t.Object.Get("atlas", false); atlas != nil { + if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { var err error config.Atlas, err = loadAtlasHcl(atlas) if err != nil { @@ -82,7 +88,7 @@ func (t *hclConfigurable) Config() (*Config, error) { } // Build the modules - if modules := t.Object.Get("module", false); modules != nil { + if modules := list.Filter("module"); len(modules.Items) > 0 { var err error config.Modules, err = loadModulesHcl(modules) if err != nil { @@ -91,7 +97,7 @@ func (t *hclConfigurable) Config() (*Config, error) { } // Build the provider configs - if providers := t.Object.Get("provider", false); providers != nil { + if providers := list.Filter("provider"); len(providers.Items) > 0 { var err error config.ProviderConfigs, err = loadProvidersHcl(providers) if err != nil { @@ -100,7 +106,7 @@ func (t *hclConfigurable) Config() (*Config, error) { } // Build the resources - if resources := t.Object.Get("resource", false); resources != nil { + if resources := list.Filter("resource"); len(resources.Items) > 0 { var err error config.Resources, err = loadResourcesHcl(resources) if err != nil { @@ -109,7 +115,7 @@ func (t *hclConfigurable) Config() (*Config, error) { } // Build the outputs - if outputs := t.Object.Get("output", false); outputs != nil { + if outputs := list.Filter("output"); len(outputs.Items) > 0 { var err error config.Outputs, err = loadOutputsHcl(outputs) if err != nil { @@ -118,8 +124,13 @@ func (t *hclConfigurable) Config() (*Config, error) { } // Check for invalid keys - for _, elem := range t.Object.Elem(true) { - k := elem.Key + for _, item := range list.Items { + if len(item.Keys) == 0 { + // Not sure how this would happen, but let's avoid a panic + continue + } + + k := item.Keys[0].Token.Value().(string) if _, ok := validKeys[k]; ok { continue } @@ -133,8 +144,6 @@ func (t *hclConfigurable) Config() (*Config, error) { // loadFileHcl is a fileLoaderFunc that knows how to read HCL // files and turn them into hclConfigurables. func loadFileHcl(root string) (configurable, []string, error) { - var obj *hclobj.Object = nil - // Read the HCL file and prepare for parsing d, err := ioutil.ReadFile(root) if err != nil { @@ -143,7 +152,7 @@ func loadFileHcl(root string) (configurable, []string, error) { } // Parse it - obj, err = hcl.Parse(string(d)) + hclRoot, err := hcl.Parse(string(d)) if err != nil { return nil, nil, fmt.Errorf( "Error parsing %s: %s", root, err) @@ -151,8 +160,8 @@ func loadFileHcl(root string) (configurable, []string, error) { // Start building the result result := &hclConfigurable{ - File: root, - Object: obj, + File: root, + Root: hclRoot, } // Dive in, find the imports. This is disabled for now since @@ -200,9 +209,16 @@ func loadFileHcl(root string) (configurable, []string, error) { // Given a handle to a HCL object, this transforms it into the Atlas // configuration. -func loadAtlasHcl(obj *hclobj.Object) (*AtlasConfig, error) { +func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) { + if len(list.Items) > 1 { + return nil, fmt.Errorf("only one 'atlas' block allowed") + } + + // Get our one item + item := list.Items[0] + var config AtlasConfig - if err := hcl.DecodeObject(&config, obj); err != nil { + if err := hcl.DecodeObject(&config, item.Val); err != nil { return nil, fmt.Errorf( "Error reading atlas config: %s", err) @@ -217,18 +233,10 @@ func loadAtlasHcl(obj *hclobj.Object) (*AtlasConfig, error) { // The resulting modules may not be unique, but each module // represents exactly one module definition in the HCL configuration. // We leave it up to another pass to merge them together. -func loadModulesHcl(os *hclobj.Object) ([]*Module, error) { - var allNames []*hclobj.Object - - // See loadResourcesHcl for why this exists. Don't touch this. - for _, o1 := range os.Elem(false) { - // Iterate the inner to get the list of types - for _, o2 := range o1.Elem(true) { - // Iterate all of this type to get _all_ the types - for _, o3 := range o2.Elem(false) { - allNames = append(allNames, o3) - } - } +func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { + list = list.Children() + if len(list.Items) == 0 { + return nil, nil } // Where all the results will go @@ -236,11 +244,18 @@ func loadModulesHcl(os *hclobj.Object) ([]*Module, error) { // Now go over all the types and their children in order to get // all of the actual resources. - for _, obj := range allNames { - k := obj.Key + for _, item := range list.Items { + k := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("module '%s': should be an object", k) + } var config map[string]interface{} - if err := hcl.DecodeObject(&config, obj); err != nil { + if err := hcl.DecodeObject(&config, item.Val); err != nil { return nil, fmt.Errorf( "Error reading config for %s: %s", k, @@ -260,8 +275,8 @@ func loadModulesHcl(os *hclobj.Object) ([]*Module, error) { // If we have a count, then figure it out var source string - if o := obj.Get("source", false); o != nil { - err = hcl.DecodeObject(&source, o) + if o := listVal.Filter("source"); len(o.Items) > 0 { + err = hcl.DecodeObject(&source, o.Items[0].Val) if err != nil { return nil, fmt.Errorf( "Error parsing source for %s: %s", @@ -282,27 +297,19 @@ func loadModulesHcl(os *hclobj.Object) ([]*Module, error) { // LoadOutputsHcl recurses into the given HCL object and turns // it into a mapping of outputs. -func loadOutputsHcl(os *hclobj.Object) ([]*Output, error) { - objects := make(map[string]*hclobj.Object) - - // Iterate over all the "output" blocks and get the keys along with - // their raw configuration objects. We'll parse those later. - for _, o1 := range os.Elem(false) { - for _, o2 := range o1.Elem(true) { - objects[o2.Key] = o2 - } - } - - if len(objects) == 0 { +func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { + list = list.Children() + if len(list.Items) == 0 { return nil, nil } // Go through each object and turn it into an actual result. - result := make([]*Output, 0, len(objects)) - for n, o := range objects { - var config map[string]interface{} + result := make([]*Output, 0, len(list.Items)) + for _, item := range list.Items { + n := item.Keys[0].Token.Value().(string) - if err := hcl.DecodeObject(&config, o); err != nil { + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { return nil, err } @@ -325,27 +332,26 @@ func loadOutputsHcl(os *hclobj.Object) ([]*Output, error) { // LoadProvidersHcl recurses into the given HCL object and turns // it into a mapping of provider configs. -func loadProvidersHcl(os *hclobj.Object) ([]*ProviderConfig, error) { - var objects []*hclobj.Object - - // Iterate over all the "provider" blocks and get the keys along with - // their raw configuration objects. We'll parse those later. - for _, o1 := range os.Elem(false) { - for _, o2 := range o1.Elem(true) { - objects = append(objects, o2) - } - } - - if len(objects) == 0 { +func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { + list = list.Children() + if len(list.Items) == 0 { return nil, nil } // Go through each object and turn it into an actual result. - result := make([]*ProviderConfig, 0, len(objects)) - for _, o := range objects { - var config map[string]interface{} + result := make([]*ProviderConfig, 0, len(list.Items)) + for _, item := range list.Items { + n := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("module '%s': should be an object", n) + } - if err := hcl.DecodeObject(&config, o); err != nil { + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { return nil, err } @@ -355,24 +361,24 @@ func loadProvidersHcl(os *hclobj.Object) ([]*ProviderConfig, error) { if err != nil { return nil, fmt.Errorf( "Error reading config for provider config %s: %s", - o.Key, + n, err) } // If we have an alias field, then add those in var alias string - if a := o.Get("alias", false); a != nil { - err := hcl.DecodeObject(&alias, a) + if a := listVal.Filter("alias"); len(a.Items) > 0 { + err := hcl.DecodeObject(&alias, a.Items[0].Val) if err != nil { return nil, fmt.Errorf( "Error reading alias for provider[%s]: %s", - o.Key, + n, err) } } result = append(result, &ProviderConfig{ - Name: o.Key, + Name: n, Alias: alias, RawConfig: rawConfig, }) @@ -387,27 +393,10 @@ func loadProvidersHcl(os *hclobj.Object) ([]*ProviderConfig, error) { // The resulting resources may not be unique, but each resource // represents exactly one resource definition in the HCL configuration. // We leave it up to another pass to merge them together. -func loadResourcesHcl(os *hclobj.Object) ([]*Resource, error) { - var allTypes []*hclobj.Object - - // HCL object iteration is really nasty. Below is likely to make - // no sense to anyone approaching this code. Luckily, it is very heavily - // tested. If working on a bug fix or feature, we recommend writing a - // test first then doing whatever you want to the code below. If you - // break it, the tests will catch it. Likewise, if you change this, - // MAKE SURE you write a test for your change, because its fairly impossible - // to reason about this mess. - // - // Functionally, what the code does below is get the libucl.Objects - // for all the TYPES, such as "aws_security_group". - for _, o1 := range os.Elem(false) { - // Iterate the inner to get the list of types - for _, o2 := range o1.Elem(true) { - // Iterate all of this type to get _all_ the types - for _, o3 := range o2.Elem(false) { - allTypes = append(allTypes, o3) - } - } +func loadResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { + list = list.Children() + if len(list.Items) == 0 { + return nil, nil } // Where all the results will go @@ -415,191 +404,179 @@ func loadResourcesHcl(os *hclobj.Object) ([]*Resource, error) { // Now go over all the types and their children in order to get // all of the actual resources. - for _, t := range allTypes { - for _, obj := range t.Elem(true) { - k := obj.Key + for _, item := range list.Items { + if len(item.Keys) != 2 { + // TODO: bad error message + return nil, fmt.Errorf("resource needs exactly 2 names") + } + + t := item.Keys[0].Token.Value().(string) + k := item.Keys[1].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } + + // Remove the fields we handle specially + delete(config, "connection") + delete(config, "count") + delete(config, "depends_on") + delete(config, "provisioner") + delete(config, "provider") + delete(config, "lifecycle") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } - var config map[string]interface{} - if err := hcl.DecodeObject(&config, obj); err != nil { + // If we have a count, then figure it out + var count string = "1" + if o := listVal.Filter("count"); len(o.Items) > 0 { + err = hcl.DecodeObject(&count, o.Items[0].Val) + if err != nil { return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t.Key, + "Error parsing count for %s[%s]: %s", + t, k, err) } + } + countConfig, err := NewRawConfig(map[string]interface{}{ + "count": count, + }) + if err != nil { + return nil, err + } + countConfig.Key = "count" - // Remove the fields we handle specially - delete(config, "connection") - delete(config, "count") - delete(config, "depends_on") - delete(config, "provisioner") - delete(config, "provider") - delete(config, "lifecycle") - - rawConfig, err := NewRawConfig(config) + // If we have depends fields, then add those in + var dependsOn []string + if o := listVal.Filter("depends_on"); len(o.Items) > 0 { + err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) if err != nil { return nil, fmt.Errorf( - "Error reading config for %s[%s]: %s", - t.Key, + "Error reading depends_on for %s[%s]: %s", + t, k, err) } + } - // If we have a count, then figure it out - var count string = "1" - if o := obj.Get("count", false); o != nil { - err = hcl.DecodeObject(&count, o) - if err != nil { - return nil, fmt.Errorf( - "Error parsing count for %s[%s]: %s", - t.Key, - k, - err) - } - } - countConfig, err := NewRawConfig(map[string]interface{}{ - "count": count, - }) + // If we have connection info, then parse those out + var connInfo map[string]interface{} + if o := listVal.Filter("connection"); len(o.Items) > 0 { + err := hcl.DecodeObject(&connInfo, o.Items[0].Val) if err != nil { - return nil, err - } - countConfig.Key = "count" - - // If we have depends fields, then add those in - var dependsOn []string - if o := obj.Get("depends_on", false); o != nil { - err := hcl.DecodeObject(&dependsOn, o) - if err != nil { - return nil, fmt.Errorf( - "Error reading depends_on for %s[%s]: %s", - t.Key, - k, - err) - } + return nil, fmt.Errorf( + "Error reading connection info for %s[%s]: %s", + t, + k, + err) } + } - // If we have connection info, then parse those out - var connInfo map[string]interface{} - if o := obj.Get("connection", false); o != nil { - err := hcl.DecodeObject(&connInfo, o) - if err != nil { - return nil, fmt.Errorf( - "Error reading connection info for %s[%s]: %s", - t.Key, - k, - err) - } + // If we have provisioners, then parse those out + var provisioners []*Provisioner + if os := listVal.Filter("provisioner"); len(os.Items) > 0 { + var err error + provisioners, err = loadProvisionersHcl(os, connInfo) + if err != nil { + return nil, fmt.Errorf( + "Error reading provisioners for %s[%s]: %s", + t, + k, + err) } + } - // If we have provisioners, then parse those out - var provisioners []*Provisioner - if os := obj.Get("provisioner", false); os != nil { - var err error - provisioners, err = loadProvisionersHcl(os, connInfo) - if err != nil { - return nil, fmt.Errorf( - "Error reading provisioners for %s[%s]: %s", - t.Key, - k, - err) - } + // If we have a provider, then parse it out + var provider string + if o := listVal.Filter("provider"); len(o.Items) > 0 { + err := hcl.DecodeObject(&provider, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading provider for %s[%s]: %s", + t, + k, + err) } + } - // If we have a provider, then parse it out - var provider string - if o := obj.Get("provider", false); o != nil { - err := hcl.DecodeObject(&provider, o) - if err != nil { - return nil, fmt.Errorf( - "Error reading provider for %s[%s]: %s", - t.Key, - k, - err) - } + // Check if the resource should be re-created before + // destroying the existing instance + var lifecycle ResourceLifecycle + if o := listVal.Filter("lifecycle"); len(o.Items) > 0 { + var raw map[string]interface{} + if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil { + return nil, fmt.Errorf( + "Error parsing lifecycle for %s[%s]: %s", + t, + k, + err) } - // Check if the resource should be re-created before - // destroying the existing instance - var lifecycle ResourceLifecycle - if o := obj.Get("lifecycle", false); o != nil { - var raw map[string]interface{} - if err = hcl.DecodeObject(&raw, o); err != nil { - return nil, fmt.Errorf( - "Error parsing lifecycle for %s[%s]: %s", - t.Key, - k, - err) - } - - if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil { - return nil, fmt.Errorf( - "Error parsing lifecycle for %s[%s]: %s", - t.Key, - k, - err) - } + if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil { + return nil, fmt.Errorf( + "Error parsing lifecycle for %s[%s]: %s", + t, + k, + err) } - - result = append(result, &Resource{ - Name: k, - Type: t.Key, - RawCount: countConfig, - RawConfig: rawConfig, - Provisioners: provisioners, - Provider: provider, - DependsOn: dependsOn, - Lifecycle: lifecycle, - }) } + + result = append(result, &Resource{ + Name: k, + Type: t, + RawCount: countConfig, + RawConfig: rawConfig, + Provisioners: provisioners, + Provider: provider, + DependsOn: dependsOn, + Lifecycle: lifecycle, + }) } return result, nil } -func loadProvisionersHcl(os *hclobj.Object, connInfo map[string]interface{}) ([]*Provisioner, error) { - pos := make([]*hclobj.Object, 0, int(os.Len())) - - // Accumulate all the actual provisioner configuration objects. We - // have to iterate twice here: - // - // 1. The first iteration is of the list of `provisioner` blocks. - // 2. The second iteration is of the dictionary within the - // provisioner which will have only one element which is the - // type of provisioner to use along with tis config. - // - // In JSON it looks kind of like this: - // - // [ - // { - // "shell": { - // ... - // } - // } - // ] - // - for _, o1 := range os.Elem(false) { - for _, o2 := range o1.Elem(true) { - - switch o1.Type { - case hclobj.ValueTypeList: - for _, o3 := range o2.Elem(true) { - pos = append(pos, o3) - } - case hclobj.ValueTypeObject: - pos = append(pos, o2) - } - } - } - - // Short-circuit if there are no items - if len(pos) == 0 { +func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) { + println(fmt.Sprintf("%#v", list.Items[0])) + list = list.Children() + if len(list.Items) == 0 { return nil, nil } - result := make([]*Provisioner, 0, len(pos)) - for _, po := range pos { + // Go through each object and turn it into an actual result. + result := make([]*Provisioner, 0, len(list.Items)) + for _, item := range list.Items { + n := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("provisioner '%s': should be an object", n) + } + var config map[string]interface{} - if err := hcl.DecodeObject(&config, po); err != nil { + if err := hcl.DecodeObject(&config, item.Val); err != nil { return nil, err } @@ -614,8 +591,8 @@ func loadProvisionersHcl(os *hclobj.Object, connInfo map[string]interface{}) ([] // Check if we have a provisioner-level connection // block that overrides the resource-level var subConnInfo map[string]interface{} - if o := po.Get("connection", false); o != nil { - err := hcl.DecodeObject(&subConnInfo, o) + if o := listVal.Filter("connection"); len(o.Items) > 0 { + err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val) if err != nil { return nil, err } @@ -640,7 +617,7 @@ func loadProvisionersHcl(os *hclobj.Object, connInfo map[string]interface{}) ([] } result = append(result, &Provisioner{ - Type: po.Key, + Type: n, RawConfig: rawConfig, ConnInfo: connRaw, }) From 13c5fdb1542424210dbb955e341b9f92b50cbed3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 7 Nov 2015 16:55:07 -0800 Subject: [PATCH 17/30] config: remove debug line --- config/loader_hcl.go | 1 - 1 file changed, 1 deletion(-) diff --git a/config/loader_hcl.go b/config/loader_hcl.go index 07b0eef84a0e..c62ca37314ae 100644 --- a/config/loader_hcl.go +++ b/config/loader_hcl.go @@ -557,7 +557,6 @@ func loadResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { } func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) { - println(fmt.Sprintf("%#v", list.Items[0])) list = list.Children() if len(list.Items) == 0 { return nil, nil From 6ecec7fe833e11e09c16b994eb24746fa2e20b77 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Sun, 8 Nov 2015 19:34:56 +1300 Subject: [PATCH 18/30] Add coalesce func --- config/interpolate_funcs.go | 25 +++++++++++++++++ config/interpolate_funcs_test.go | 27 +++++++++++++++++++ .../docs/configuration/interpolation.html.md | 3 +++ 3 files changed, 55 insertions(+) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index e98ade2f0c8d..5538763c0ced 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -25,6 +25,7 @@ func init() { "cidrhost": interpolationFuncCidrHost(), "cidrnetmask": interpolationFuncCidrNetmask(), "cidrsubnet": interpolationFuncCidrSubnet(), + "coalesce": interpolationFuncCoalesce(), "compact": interpolationFuncCompact(), "concat": interpolationFuncConcat(), "element": interpolationFuncElement(), @@ -145,6 +146,30 @@ func interpolationFuncCidrSubnet() ast.Function { } } +// interpolationFuncCoalesce implements the "coalesce" function that +// returns the first non null / empty string from the provided input +func interpolationFuncCoalesce() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Variadic: true, + VariadicType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + if len(args) < 2 { + return nil, fmt.Errorf("must provide at least two arguments") + } + for _, arg := range args { + argument := arg.(string) + + if argument != "" { + return argument, nil + } + } + return "", nil + }, + } +} + // interpolationFuncConcat implements the "concat" function that // concatenates multiple strings. This isn't actually necessary anymore // since our language supports string concat natively, but for backwards diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index bbfdd484adad..3aeb50db1756 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -147,6 +147,33 @@ func TestInterpolateFuncCidrSubnet(t *testing.T) { }) } +func TestInterpolateFuncCoalesce(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + { + `${coalesce("first", "second", "third")}`, + "first", + false, + }, + { + `${coalesce("", "second", "third")}`, + "second", + false, + }, + { + `${coalesce("", "", "")}`, + "", + false, + }, + { + `${coalesce("foo")}`, + nil, + true, + }, + }, + }) +} + func TestInterpolateFuncDeprecatedConcat(t *testing.T) { testFunction(t, testFunctionConfig{ Cases: []testFunctionCase{ diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 049c71825199..21efbd83e668 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -95,6 +95,9 @@ The supported built-in functions are: CIDR notation (like ``10.0.0.0/8``) and extends its prefix to include an additional subnet number. For example, ``cidrsubnet("10.0.0.0/8", 8, 2)`` returns ``10.2.0.0/16``. + + * `coalesce(string1, string2, ...)` - Returns the first non-empty value from + the given arguments. At least two arguments must be provided. * `compact(list)` - Removes empty string elements from a list. This can be useful in some cases, for example when passing joined lists as module From 7e676a672f44e5e81ece3b2d81816aaf711c4147 Mon Sep 17 00:00:00 2001 From: Kirill Shirinkin Date: Sun, 8 Nov 2015 20:43:34 +0100 Subject: [PATCH 19/30] provider/openstack: extend documentation of Neutron::FloatingIP --- .../openstack/r/networking_floatingip_v2.html.markdown | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown index 9389eafeb286..fb1c57cbc4dd 100644 --- a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown @@ -35,6 +35,9 @@ The following arguments are supported: * `pool` - (Required) The name of the pool from which to obtain the floating IP. Changing this creates a new floating IP. +* `port_id` - ID of an existing port with at least one IP address to associate with +this floating IP. + ## Attributes Reference The following attributes are exported: @@ -42,3 +45,4 @@ The following attributes are exported: * `region` - See Argument Reference above. * `pool` - See Argument Reference above. * `address` - The actual floating IP address itself. +* `port_id` - ID of associated port. From 87e384dc97829f3278311833ac1e7933fcd18cec Mon Sep 17 00:00:00 2001 From: Jef Statham Date: Mon, 9 Nov 2015 09:56:38 -0500 Subject: [PATCH 20/30] Fix spelling mistake in aws saml example Example resource name is `aws_iam_saml_provider` instead of `aws_saml_provider`. --- .../source/docs/providers/aws/r/iam_saml_provider.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown index adba6d350d23..a27d45b64f12 100644 --- a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown +++ b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown @@ -13,7 +13,7 @@ Provides an IAM SAML provider. ## Example Usage ``` -resource "aws_saml_provider" "default" { +resource "aws_iam_saml_provider" "default" { name = "myprovider" saml_metadata_document = "${file("saml-metadata.xml")}" } From 13e862673f31ce2c27b7a704946b23bb9c317a5c Mon Sep 17 00:00:00 2001 From: Jef Statham Date: Mon, 9 Nov 2015 10:52:09 -0500 Subject: [PATCH 21/30] Fix spelling mistake in aws iam saml title --- .../source/docs/providers/aws/r/iam_saml_provider.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown index a27d45b64f12..49fe6ec73e83 100644 --- a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown +++ b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown @@ -1,6 +1,6 @@ --- layout: "aws" -page_title: "AWS: aws_saml_provider" +page_title: "AWS: aws_iam_saml_provider" sidebar_current: "docs-aws-resource-iam-saml-provider" description: |- Provides an IAM SAML provider. From a80f6fd9795e731c72f4be3c596ccd28ebdcc088 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Mon, 9 Nov 2015 10:18:53 -0600 Subject: [PATCH 22/30] Always deploy from stable website branch --- scripts/website_push.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/website_push.sh b/scripts/website_push.sh index fa58fd694aed..53ed59777c0d 100755 --- a/scripts/website_push.sh +++ b/scripts/website_push.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Switch to the stable-website branch +git checkout stable-website + # Set the tmpdir if [ -z "$TMPDIR" ]; then TMPDIR="/tmp" From f4c03ec2a6b8636a3ca74d4dc75470ffaa019693 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 9 Nov 2015 11:38:51 -0500 Subject: [PATCH 23/30] Reflect new comment format in stringer.go As of November 8th 2015, (4b07c5ce8a), the word "Code" is prepended to the comments in Go source files generated by the stringer utility. --- command/counthookaction_string.go | 2 +- config/lang/ast/type_string.go | 2 +- helper/schema/getsource_string.go | 2 +- helper/schema/valuetype_string.go | 2 +- terraform/graphnodeconfigtype_string.go | 2 +- terraform/instancetype_string.go | 2 +- terraform/walkoperation_string.go | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/command/counthookaction_string.go b/command/counthookaction_string.go index 8b90dc50bcfd..c0c40d0de606 100644 --- a/command/counthookaction_string.go +++ b/command/counthookaction_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=countHookAction hook_count_action.go; DO NOT EDIT +// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT package command diff --git a/config/lang/ast/type_string.go b/config/lang/ast/type_string.go index d9b5a2df4c59..5410e011e1c5 100644 --- a/config/lang/ast/type_string.go +++ b/config/lang/ast/type_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=Type; DO NOT EDIT +// Code generated by "stringer -type=Type"; DO NOT EDIT package ast diff --git a/helper/schema/getsource_string.go b/helper/schema/getsource_string.go index 039bb561a084..790dbff919eb 100644 --- a/helper/schema/getsource_string.go +++ b/helper/schema/getsource_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=getSource resource_data_get_source.go; DO NOT EDIT +// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT package schema diff --git a/helper/schema/valuetype_string.go b/helper/schema/valuetype_string.go index 42442a46b5ef..08f008450feb 100644 --- a/helper/schema/valuetype_string.go +++ b/helper/schema/valuetype_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=ValueType valuetype.go; DO NOT EDIT +// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT package schema diff --git a/terraform/graphnodeconfigtype_string.go b/terraform/graphnodeconfigtype_string.go index d8c1724f47a2..9ea0acbebe92 100644 --- a/terraform/graphnodeconfigtype_string.go +++ b/terraform/graphnodeconfigtype_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=GraphNodeConfigType graph_config_node_type.go; DO NOT EDIT +// Code generated by "stringer -type=GraphNodeConfigType graph_config_node_type.go"; DO NOT EDIT package terraform diff --git a/terraform/instancetype_string.go b/terraform/instancetype_string.go index 3114bc1571f5..f65414b347e9 100644 --- a/terraform/instancetype_string.go +++ b/terraform/instancetype_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=InstanceType instancetype.go; DO NOT EDIT +// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT package terraform diff --git a/terraform/walkoperation_string.go b/terraform/walkoperation_string.go index 1ce3661c49dd..0811fc87441e 100644 --- a/terraform/walkoperation_string.go +++ b/terraform/walkoperation_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=walkOperation graph_walk_operation.go; DO NOT EDIT +// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT package terraform From 796fdeef53d4653d4af983bc2ce879edf1dac4c0 Mon Sep 17 00:00:00 2001 From: "Michael H. Oshita" Date: Tue, 10 Nov 2015 01:42:59 +0900 Subject: [PATCH 24/30] Update rds_cluster.html.markdown I needed `db_subnet_group_name` in the rds_cluster resource as well when creating on a non-default VPC. https://github.com/hashicorp/terraform/pull/2935#issuecomment-133481106 --- website/source/docs/providers/aws/r/rds_cluster.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown index fb1f0dac852c..c60e6ef294e8 100644 --- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown @@ -63,6 +63,7 @@ Default: A 30-minute window selected at random from an 8-hour block of time per * `apply_immediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`. See [Amazon RDS Documentation for more information.](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) +* `db_subnet_group_name` - (Optional) A DB subnet group to associate with this DB instance. ## Attributes Reference From 29636dc51d26110608ba3895803a5b60de938f52 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Mon, 9 Nov 2015 17:31:40 +0000 Subject: [PATCH 25/30] provider/openstack: Revert Security Group Rule Fix This commit reverts the patch from #3796. It has been discovered that multiple rules are being reported out of order when the configuration is applied multiple times. I feel this is a larger issue than the bug this patch originally fixed, so until I can resolve it, I am reverting the patch. --- .../resource_openstack_compute_secgroup_v2.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go index 202c4d979499..3cc0cbf0cc64 100644 --- a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go @@ -131,10 +131,10 @@ func resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) err d.Set("description", sg.Description) rtm := rulesToMap(sg.Rules) for _, v := range rtm { - if v["from_group_id"] == d.Get("name") { - v["self"] = true + if v["group"] == d.Get("name") { + v["self"] = "1" } else { - v["self"] = false + v["self"] = "0" } } log.Printf("[DEBUG] rulesToMap(sg.Rules): %+v", rtm) @@ -283,12 +283,12 @@ func rulesToMap(sgrs []secgroups.Rule) []map[string]interface{} { sgrMap := make([]map[string]interface{}, len(sgrs)) for i, sgr := range sgrs { sgrMap[i] = map[string]interface{}{ - "id": sgr.ID, - "from_port": sgr.FromPort, - "to_port": sgr.ToPort, - "ip_protocol": sgr.IPProtocol, - "cidr": sgr.IPRange.CIDR, - "from_group_id": sgr.Group.Name, + "id": sgr.ID, + "from_port": sgr.FromPort, + "to_port": sgr.ToPort, + "ip_protocol": sgr.IPProtocol, + "cidr": sgr.IPRange.CIDR, + "group": sgr.Group.Name, } } return sgrMap From 3f06a7c56780fcf539d9ebd8d2313b4c37c22cbf Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Mon, 9 Nov 2015 10:58:06 -0700 Subject: [PATCH 26/30] Update CHANGELOG.md --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f010a10422c7..554c1666ab8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,7 +61,6 @@ BUG FIXES: * provider/openstack: Fix boot from volume [GH-3206] * provider/openstack: Fix crashing when image is no longer accessible [GH-2189] * provider/openstack: Better handling of network resource state changes [GH-3712] - * provider/openstack: Fix issue preventing security group rules from being removed [GH-3796] * provider/openstack: Fix crashing when no security group is specified [GH-3801] ## 0.6.6 (October 23, 2015) From 2a0334125c0db514bc1fb5aa1cb5df9845342f5e Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 9 Nov 2015 15:15:25 -0500 Subject: [PATCH 27/30] Add test attempting to reproduce #2598 This attempts to reproduce the issue described in #2598 whereby outputs added after an apply are not reflected in the output. As per the issue the outputs are described using the JSON syntax. --- terraform/context_apply_test.go | 49 +++++++++++++++++++ terraform/terraform_test.go | 16 ++++++ .../apply-output-add-after/main.tf | 6 +++ .../apply-output-add-after/outputs.tf.json | 10 ++++ .../apply-output-add-before/main.tf | 6 +++ .../apply-output-add-before/outputs.tf.json | 7 +++ 6 files changed, 94 insertions(+) create mode 100644 terraform/test-fixtures/apply-output-add-after/main.tf create mode 100644 terraform/test-fixtures/apply-output-add-after/outputs.tf.json create mode 100644 terraform/test-fixtures/apply-output-add-before/main.tf create mode 100644 terraform/test-fixtures/apply-output-add-before/outputs.tf.json diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index 1fd069db08ae..4060dd3e37cd 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -2851,6 +2851,55 @@ func TestContext2Apply_outputInvalid(t *testing.T) { } } +func TestContext2Apply_outputAdd(t *testing.T) { + m1 := testModule(t, "apply-output-add-before") + p1 := testProvider("aws") + p1.ApplyFn = testApplyFn + p1.DiffFn = testDiffFn + ctx1 := testContext2(t, &ContextOpts{ + Module: m1, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p1), + }, + }) + + if _, err := ctx1.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + state1, err := ctx1.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + m2 := testModule(t, "apply-output-add-after") + p2 := testProvider("aws") + p2.ApplyFn = testApplyFn + p2.DiffFn = testDiffFn + ctx2 := testContext2(t, &ContextOpts{ + Module: m2, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p2), + }, + State: state1, + }) + + if _, err := ctx2.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + state2, err := ctx2.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(state2.String()) + expected := strings.TrimSpace(testTerraformApplyOutputAddStr) + if actual != expected { + t.Fatalf("bad: \n%s", actual) + } +} + func TestContext2Apply_outputList(t *testing.T) { m := testModule(t, "apply-output-list") p := testProvider("aws") diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index d17726acb457..3b1653f431a4 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -575,6 +575,22 @@ Outputs: foo_num = 2 ` +const testTerraformApplyOutputAddStr = ` +aws_instance.test.0: + ID = foo + foo = foo0 + type = aws_instance +aws_instance.test.1: + ID = foo + foo = foo1 + type = aws_instance + +Outputs: + +firstOutput = foo0 +secondOutput = foo1 +` + const testTerraformApplyOutputListStr = ` aws_instance.bar.0: ID = foo diff --git a/terraform/test-fixtures/apply-output-add-after/main.tf b/terraform/test-fixtures/apply-output-add-after/main.tf new file mode 100644 index 000000000000..1c10eaafc571 --- /dev/null +++ b/terraform/test-fixtures/apply-output-add-after/main.tf @@ -0,0 +1,6 @@ +provider "aws" {} + +resource "aws_instance" "test" { + foo = "${format("foo%d", count.index)}" + count = 2 +} diff --git a/terraform/test-fixtures/apply-output-add-after/outputs.tf.json b/terraform/test-fixtures/apply-output-add-after/outputs.tf.json new file mode 100644 index 000000000000..32e96b0ee07c --- /dev/null +++ b/terraform/test-fixtures/apply-output-add-after/outputs.tf.json @@ -0,0 +1,10 @@ +{ + "output": { + "firstOutput": { + "value": "${aws_instance.test.0.foo}" + }, + "secondOutput": { + "value": "${aws_instance.test.1.foo}" + } + } +} diff --git a/terraform/test-fixtures/apply-output-add-before/main.tf b/terraform/test-fixtures/apply-output-add-before/main.tf new file mode 100644 index 000000000000..1c10eaafc571 --- /dev/null +++ b/terraform/test-fixtures/apply-output-add-before/main.tf @@ -0,0 +1,6 @@ +provider "aws" {} + +resource "aws_instance" "test" { + foo = "${format("foo%d", count.index)}" + count = 2 +} diff --git a/terraform/test-fixtures/apply-output-add-before/outputs.tf.json b/terraform/test-fixtures/apply-output-add-before/outputs.tf.json new file mode 100644 index 000000000000..238668ef3d17 --- /dev/null +++ b/terraform/test-fixtures/apply-output-add-before/outputs.tf.json @@ -0,0 +1,7 @@ +{ + "output": { + "firstOutput": { + "value": "${aws_instance.test.0.foo}" + } + } +} From 64d982ac9ed2fe17a50967952bfce5e47211139d Mon Sep 17 00:00:00 2001 From: mcinteer Date: Tue, 10 Nov 2015 11:27:45 +1300 Subject: [PATCH 28/30] Change the docs as the token can be explicitly set This tripped me up today when I was trying to connect using MFA. I had a look at the source and found the token property, tested it out and low and behold it worked! Hopefully this saves someone else going through the same pain --- website/source/docs/providers/aws/index.html.markdown | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/source/docs/providers/aws/index.html.markdown b/website/source/docs/providers/aws/index.html.markdown index 05efd5700f9a..7199111c2b17 100644 --- a/website/source/docs/providers/aws/index.html.markdown +++ b/website/source/docs/providers/aws/index.html.markdown @@ -59,5 +59,4 @@ The following arguments are supported in the `provider` block: * `kinesis_endpoint` - (Optional) Use this to override the default endpoint URL constructed from the `region`. It's typically used to connect to kinesalite. -In addition to the above parameters, the `AWS_SESSION_TOKEN` environmental -variable can be set to set an MFA token. +* `token` - (Optional) Use this to set an MFA token. It can also be sourced from the `AWS_SECURITY_TOKEN` environment variable. From a49b162dd1782d35f69d895ac4fba7549b0304d3 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 10 Nov 2015 13:31:15 -0500 Subject: [PATCH 29/30] Prompt for input variables before context validate Also adds a regression test using Mock UI. Fixes #3767. --- command/plan.go | 8 +++++--- command/plan_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/command/plan.go b/command/plan.go index cd1aeaec6f2a..8c5fda5cc9ad 100644 --- a/command/plan.go +++ b/command/plan.go @@ -68,14 +68,16 @@ func (c *PlanCommand) Run(args []string) int { c.Ui.Error(err.Error()) return 1 } - if !validateContext(ctx, c.Ui) { - return 1 - } + if err := ctx.Input(c.InputMode()); err != nil { c.Ui.Error(fmt.Sprintf("Error configuring: %s", err)) return 1 } + if !validateContext(ctx, c.Ui) { + return 1 + } + if refresh { c.Ui.Output("Refreshing Terraform state prior to plan...\n") state, err := ctx.Refresh() diff --git a/command/plan_test.go b/command/plan_test.go index d49200a5fe6d..d0d14bc5670f 100644 --- a/command/plan_test.go +++ b/command/plan_test.go @@ -1,6 +1,7 @@ package command import ( + "bytes" "io/ioutil" "os" "path/filepath" @@ -330,6 +331,30 @@ func TestPlan_vars(t *testing.T) { } } +func TestPlan_varsUnset(t *testing.T) { + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + defaultInputReader = bytes.NewBufferString("bar\n") + + p := testProvider() + ui := new(cli.MockUi) + c := &PlanCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + testFixturePath("plan-vars"), + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + func TestPlan_varFile(t *testing.T) { varFilePath := testTempFile(t) if err := ioutil.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil { From bd35d2157ab500493d4f1eb464925c71217b031d Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 10 Nov 2015 14:11:48 -0500 Subject: [PATCH 30/30] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 554c1666ab8d..8c0be5b88d00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ BUG FIXES: * `terraform remote config`: update `--help` output [GH-3632] * core: modules on Git branches now update properly [GH-1568] + * core: Fix issue preventing input prompts for unset variables during plan [GH-3843] * provider/google: Timeout when deleting large instance_group_manager [GH-3591] * provider/aws: Fix issue with order of Termincation Policies in AutoScaling Groups. This will introduce plans on upgrade to this version, in order to correct the ordering [GH-2890]