diff --git a/.changelog/24982.txt b/.changelog/24982.txt new file mode 100644 index 00000000000..f4f23362caa --- /dev/null +++ b/.changelog/24982.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_redshift_cluster: Add `default_iam_role_arn`, `maintenance_track_name`, and `manual_snapshot_retention_period` arguments. +``` + +```release-note:enhancement +data-source/aws_redshift_cluster: Add `arn`, `cluster_nodes`, `cluster_nodes`, `maintenance_track_name`, `manual_snapshot_retention_period`, `log_destination_type`, and `log_exports` attributes. +``` \ No newline at end of file diff --git a/internal/service/redshift/cluster.go b/internal/service/redshift/cluster.go index 02126410d23..225046adcd4 100644 --- a/internal/service/redshift/cluster.go +++ b/internal/service/redshift/cluster.go @@ -153,6 +153,11 @@ func ResourceCluster() *schema.Resource { validation.StringMatch(regexp.MustCompile(`(?i)^[a-z_]`), "first character must be a letter or underscore"), ), }, + "default_iam_role_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, "dns_name": { Type: schema.TypeString, Computed: true, @@ -235,6 +240,17 @@ func ResourceCluster() *schema.Resource { }, }, }, + "maintenance_track_name": { + Type: schema.TypeString, + Optional: true, + Default: "current", + }, + "manual_snapshot_retention_period": { + Type: schema.TypeInt, + Optional: true, + Default: -1, + ValidateFunc: validation.IntBetween(-1, 3653), + }, "master_password": { Type: schema.TypeString, Optional: true, @@ -425,6 +441,11 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { input.ClusterSubnetGroupName = aws.String(v.(string)) } + if v, ok := d.GetOk("default_iam_role_arn"); ok { + backupInput.DefaultIamRoleArn = aws.String(v.(string)) + input.DefaultIamRoleArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("elastic_ip"); ok { backupInput.ElasticIp = aws.String(v.(string)) input.ElasticIp = aws.String(v.(string)) @@ -445,6 +466,16 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { input.KmsKeyId = aws.String(v.(string)) } + if v, ok := d.GetOk("maintenance_track_name"); ok { + backupInput.MaintenanceTrackName = aws.String(v.(string)) + input.MaintenanceTrackName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("manual_snapshot_retention_period"); ok { + backupInput.ManualSnapshotRetentionPeriod = aws.Int64(int64(v.(int))) + input.ManualSnapshotRetentionPeriod = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("number_of_nodes"); ok { backupInput.NumberOfNodes = aws.Int64(int64(v.(int))) // NumberOfNodes set below for CreateCluster. @@ -566,7 +597,7 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("allow_version_upgrade", rsc.AllowVersionUpgrade) arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, - Service: "redshift", + Service: redshift.ServiceName, Region: meta.(*conns.AWSClient).Region, AccountID: meta.(*conns.AWSClient).AccountID, Resource: fmt.Sprintf("cluster:%s", d.Id()), @@ -579,7 +610,7 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("availability_zone", rsc.AvailabilityZone) azr, err := clusterAvailabilityZoneRelocationStatus(rsc) if err != nil { - return fmt.Errorf("error reading Redshift Cluster (%s): %w", d.Id(), err) + return err } d.Set("availability_zone_relocation_enabled", azr) d.Set("cluster_identifier", rsc.ClusterIdentifier) @@ -597,12 +628,15 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { } d.Set("cluster_version", rsc.ClusterVersion) d.Set("database_name", rsc.DBName) + d.Set("default_iam_role_arn", rsc.DefaultIamRoleArn) d.Set("encrypted", rsc.Encrypted) d.Set("enhanced_vpc_routing", rsc.EnhancedVpcRouting) d.Set("kms_key_id", rsc.KmsKeyId) if err := d.Set("logging", flattenLogging(loggingStatus)); err != nil { return fmt.Errorf("error setting logging: %w", err) } + d.Set("maintenance_track_name", rsc.MaintenanceTrackName) + d.Set("manual_snapshot_retention_period", rsc.ManualSnapshotRetentionPeriod) d.Set("master_username", rsc.MasterUsername) d.Set("node_type", rsc.NodeType) d.Set("number_of_nodes", rsc.NumberOfNodes) @@ -690,6 +724,14 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { input.ClusterSecurityGroups = flex.ExpandStringSet(d.Get("cluster_security_groups").(*schema.Set)) } + if d.HasChange("maintenance_track_name") { + input.MaintenanceTrackName = aws.String(d.Get("maintenance_track_name").(string)) + } + + if d.HasChange("manual_snapshot_retention_period") { + input.ManualSnapshotRetentionPeriod = aws.Int64(int64(d.Get("manual_snapshot_retention_period").(int))) + } + // If the cluster type, node type, or number of nodes changed, then the AWS API expects all three // items to be sent over. if d.HasChanges("cluster_type", "node_type", "number_of_nodes") { @@ -751,7 +793,7 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { } } - if d.HasChange("iam_roles") { + if d.HasChanges("iam_roles", "default_iam_role_arn") { o, n := d.GetChange("iam_roles") if o == nil { o = new(schema.Set) @@ -769,6 +811,7 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { AddIamRoles: flex.ExpandStringSet(add), ClusterIdentifier: aws.String(d.Id()), RemoveIamRoles: flex.ExpandStringSet(del), + DefaultIamRoleArn: aws.String(d.Get("default_iam_role_arn").(string)), } log.Printf("[DEBUG] Modifying Redshift Cluster IAM Roles: %s", input) diff --git a/internal/service/redshift/cluster_data_source.go b/internal/service/redshift/cluster_data_source.go index 2d0a741e7ba..57a9c9c0a97 100644 --- a/internal/service/redshift/cluster_data_source.go +++ b/internal/service/redshift/cluster_data_source.go @@ -2,12 +2,13 @@ package redshift import ( "fmt" - "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/redshift" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) @@ -24,6 +25,10 @@ func DataSourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, "automated_snapshot_retention_period": { Type: schema.TypeInt, Computed: true, @@ -44,6 +49,26 @@ func DataSourceCluster() *schema.Resource { Type: schema.TypeString, Required: true, }, + "cluster_nodes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_role": { + Type: schema.TypeString, + Computed: true, + }, + "private_ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "public_ip_address": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "cluster_parameter_group_name": { Type: schema.TypeString, Computed: true, @@ -77,6 +102,10 @@ func DataSourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "default_iam_role_arn": { + Type: schema.TypeString, + Computed: true, + }, "elastic_ip": { Type: schema.TypeString, Computed: true, @@ -110,6 +139,14 @@ func DataSourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "maintenance_track_name": { + Type: schema.TypeString, + Computed: true, + }, + "manual_snapshot_retention_period": { + Type: schema.TypeInt, + Computed: true, + }, "node_type": { Type: schema.TypeString, Computed: true, @@ -134,6 +171,15 @@ func DataSourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "log_destination_type": { + Type: schema.TypeString, + Computed: true, + }, + "log_exports": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "tags": tftags.TagsSchema(), "vpc_id": { Type: schema.TypeString, @@ -152,26 +198,23 @@ func dataSourceClusterRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).RedshiftConn ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - cluster := d.Get("cluster_identifier").(string) - - log.Printf("[INFO] Reading Redshift Cluster Information: %s", cluster) - - resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{ - ClusterIdentifier: aws.String(cluster), - }) + clusterID := d.Get("cluster_identifier").(string) + rsc, err := FindClusterByID(conn, clusterID) if err != nil { - return fmt.Errorf("Error describing Redshift Cluster: %s, error: %w", cluster, err) + return fmt.Errorf("reading Redshift Cluster (%s): %w", clusterID, err) } - if resp.Clusters == nil || len(resp.Clusters) == 0 || resp.Clusters[0] == nil { - return fmt.Errorf("Error describing Redshift Cluster: %s, cluster information not found", cluster) - } - - rsc := resp.Clusters[0] - - d.SetId(cluster) + d.SetId(clusterID) d.Set("allow_version_upgrade", rsc.AllowVersionUpgrade) + arn := arn.ARN{ + Partition: meta.(*conns.AWSClient).Partition, + Service: redshift.ServiceName, + Region: meta.(*conns.AWSClient).Region, + AccountID: meta.(*conns.AWSClient).AccountID, + Resource: fmt.Sprintf("cluster:%s", d.Id()), + }.String() + d.Set("arn", arn) d.Set("automated_snapshot_retention_period", rsc.AutomatedSnapshotRetentionPeriod) if rsc.AquaConfiguration != nil { d.Set("aqua_configuration_status", rsc.AquaConfiguration.AquaConfigurationStatus) @@ -179,10 +222,13 @@ func dataSourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("availability_zone", rsc.AvailabilityZone) azr, err := clusterAvailabilityZoneRelocationStatus(rsc) if err != nil { - return fmt.Errorf("error reading Redshift Cluster (%s): %w", d.Id(), err) + return err } d.Set("availability_zone_relocation_enabled", azr) d.Set("cluster_identifier", rsc.ClusterIdentifier) + if err := d.Set("cluster_nodes", flattenClusterNodes(rsc.ClusterNodes)); err != nil { + return fmt.Errorf("setting cluster_nodes: %w", err) + } if len(rsc.ClusterParameterGroups) > 0 { d.Set("cluster_parameter_group_name", rsc.ClusterParameterGroups[0].ParameterGroupName) @@ -193,18 +239,16 @@ func dataSourceClusterRead(d *schema.ResourceData, meta interface{}) error { var csg []string for _, g := range rsc.ClusterSecurityGroups { - csg = append(csg, *g.ClusterSecurityGroupName) - } - if err := d.Set("cluster_security_groups", csg); err != nil { - return fmt.Errorf("Error saving Cluster Security Group Names to state for Redshift Cluster (%s): %w", cluster, err) + csg = append(csg, aws.StringValue(g.ClusterSecurityGroupName)) } + d.Set("cluster_security_groups", csg) d.Set("cluster_subnet_group_name", rsc.ClusterSubnetGroupName) if len(rsc.ClusterNodes) > 1 { - d.Set("cluster_type", "multi-node") + d.Set("cluster_type", clusterTypeMultiNode) } else { - d.Set("cluster_type", "single-node") + d.Set("cluster_type", clusterTypeSingleNode) } d.Set("cluster_version", rsc.ClusterVersion) @@ -224,11 +268,9 @@ func dataSourceClusterRead(d *schema.ResourceData, meta interface{}) error { var iamRoles []string for _, i := range rsc.IamRoles { - iamRoles = append(iamRoles, *i.IamRoleArn) - } - if err := d.Set("iam_roles", iamRoles); err != nil { - return fmt.Errorf("Error saving IAM Roles to state for Redshift Cluster (%s): %w", cluster, err) + iamRoles = append(iamRoles, aws.StringValue(i.IamRoleArn)) } + d.Set("iam_roles", iamRoles) d.Set("kms_key_id", rsc.KmsKeyId) d.Set("master_username", rsc.MasterUsername) @@ -237,34 +279,36 @@ func dataSourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("port", rsc.Endpoint.Port) d.Set("preferred_maintenance_window", rsc.PreferredMaintenanceWindow) d.Set("publicly_accessible", rsc.PubliclyAccessible) + d.Set("default_iam_role_arn", rsc.DefaultIamRoleArn) + d.Set("maintenance_track_name", rsc.MaintenanceTrackName) + d.Set("manual_snapshot_retention_period", rsc.ManualSnapshotRetentionPeriod) if err := d.Set("tags", KeyValueTags(rsc.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %w", err) + return fmt.Errorf("setting tags: %w", err) } d.Set("vpc_id", rsc.VpcId) var vpcg []string for _, g := range rsc.VpcSecurityGroups { - vpcg = append(vpcg, *g.VpcSecurityGroupId) - } - if err := d.Set("vpc_security_group_ids", vpcg); err != nil { - return fmt.Errorf("Error saving VPC Security Group IDs to state for Redshift Cluster (%s): %w", cluster, err) + vpcg = append(vpcg, aws.StringValue(g.VpcSecurityGroupId)) } + d.Set("vpc_security_group_ids", vpcg) - log.Printf("[INFO] Reading Redshift Cluster Logging Status: %s", cluster) - loggingStatus, loggingErr := conn.DescribeLoggingStatus(&redshift.DescribeLoggingStatusInput{ - ClusterIdentifier: aws.String(cluster), + loggingStatus, err := conn.DescribeLoggingStatus(&redshift.DescribeLoggingStatusInput{ + ClusterIdentifier: aws.String(clusterID), }) - if loggingErr != nil { - return loggingErr + if err != nil { + return fmt.Errorf("reading Redshift Cluster (%s) logging status: %w", d.Id(), err) } if loggingStatus != nil && aws.BoolValue(loggingStatus.LoggingEnabled) { d.Set("enable_logging", loggingStatus.LoggingEnabled) d.Set("bucket_name", loggingStatus.BucketName) d.Set("s3_key_prefix", loggingStatus.S3KeyPrefix) + d.Set("log_exports", flex.FlattenStringSet(loggingStatus.LogExports)) + d.Set("log_destination_type", loggingStatus.LogDestinationType) } return nil diff --git a/internal/service/redshift/cluster_data_source_test.go b/internal/service/redshift/cluster_data_source_test.go index 43bebf72db6..6d512ecc52b 100644 --- a/internal/service/redshift/cluster_data_source_test.go +++ b/internal/service/redshift/cluster_data_source_test.go @@ -23,6 +23,8 @@ func TestAccRedshiftClusterDataSource_basic(t *testing.T) { { Config: testAccClusterDataSourceConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "cluster_nodes.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "cluster_nodes.0.public_ip_address"), resource.TestCheckResourceAttrSet(dataSourceName, "allow_version_upgrade"), resource.TestCheckResourceAttrSet(dataSourceName, "automated_snapshot_retention_period"), resource.TestCheckResourceAttrSet(dataSourceName, "availability_zone"), @@ -40,6 +42,9 @@ func TestAccRedshiftClusterDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrSet(dataSourceName, "number_of_nodes"), resource.TestCheckResourceAttrSet(dataSourceName, "port"), resource.TestCheckResourceAttrSet(dataSourceName, "preferred_maintenance_window"), + resource.TestCheckResourceAttrSet(dataSourceName, "manual_snapshot_retention_period"), + resource.TestCheckResourceAttrSet(dataSourceName, "maintenance_track_name"), + resource.TestCheckResourceAttrSet(dataSourceName, "arn"), resource.TestCheckResourceAttrSet(dataSourceName, "publicly_accessible"), resource.TestCheckResourceAttrPair(dataSourceName, "availability_zone_relocation_enabled", resourceName, "availability_zone_relocation_enabled"), ), @@ -133,38 +138,10 @@ data "aws_redshift_cluster" "test" { } func testAccClusterDataSourceConfig_vpc(rName string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test1" { - cidr_block = "10.1.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test2" { - cidr_block = "10.1.2.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` resource "aws_redshift_subnet_group" "test" { name = %[1]q - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] + subnet_ids = aws_subnet.test[*].id } resource "aws_security_group" "test" { diff --git a/internal/service/redshift/cluster_test.go b/internal/service/redshift/cluster_test.go index 123975dd8aa..86de13d7c8f 100644 --- a/internal/service/redshift/cluster_test.go +++ b/internal/service/redshift/cluster_test.go @@ -41,6 +41,9 @@ func TestAccRedshiftCluster_basic(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "dns_name", regexp.MustCompile(fmt.Sprintf("^%s.*\\.redshift\\..*", rName))), resource.TestCheckResourceAttr(resourceName, "availability_zone_relocation_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "aqua_configuration_status", "auto"), + resource.TestCheckResourceAttr(resourceName, "maintenance_track_name", "current"), + resource.TestCheckResourceAttr(resourceName, "manual_snapshot_retention_period", "-1"), + resource.TestCheckResourceAttr(resourceName, "iam_roles.#", "0"), resource.TestCheckResourceAttr(resourceName, "tags.#", "0"), ), }, diff --git a/internal/service/redshift/find.go b/internal/service/redshift/find.go index c6eb321ec7d..3e496447824 100644 --- a/internal/service/redshift/find.go +++ b/internal/service/redshift/find.go @@ -10,12 +10,22 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func FindClusterByID(conn *redshift.Redshift, id string) (*redshift.Cluster, error) { - input := &redshift.DescribeClustersInput{ - ClusterIdentifier: aws.String(id), - } +func findClusters(conn *redshift.Redshift, input *redshift.DescribeClustersInput) ([]*redshift.Cluster, error) { + var output []*redshift.Cluster + + err := conn.DescribeClustersPages(input, func(page *redshift.DescribeClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Clusters { + if v != nil { + output = append(output, v) + } + } - output, err := conn.DescribeClusters(input) + return !lastPage + }) if tfawserr.ErrCodeEquals(err, redshift.ErrCodeClusterNotFoundFault) { return nil, &resource.NotFoundError{ @@ -28,15 +38,46 @@ func FindClusterByID(conn *redshift.Redshift, id string) (*redshift.Cluster, err return nil, err } - if output == nil || len(output.Clusters) == 0 || output.Clusters[0] == nil { + return output, nil +} + +func findCluster(conn *redshift.Redshift, input *redshift.DescribeClustersInput) (*redshift.Cluster, error) { + output, err := findClusters(conn, input) + + if err != nil { + return nil, err + } + + if len(output) == 0 || output[0] == nil { return nil, tfresource.NewEmptyResultError(input) } - if count := len(output.Clusters); count > 1 { + if count := len(output); count > 1 { return nil, tfresource.NewTooManyResultsError(count, input) } - return output.Clusters[0], nil + return output[0], nil +} + +func FindClusterByID(conn *redshift.Redshift, id string) (*redshift.Cluster, error) { + input := &redshift.DescribeClustersInput{ + ClusterIdentifier: aws.String(id), + } + + output, err := findCluster(conn, input) + + if err != nil { + return nil, err + } + + // Eventual consistency check. + if aws.StringValue(output.ClusterIdentifier) != id { + return nil, &resource.NotFoundError{ + LastRequest: input, + } + } + + return output, nil } func FindScheduledActionByName(conn *redshift.Redshift, name string) (*redshift.ScheduledAction, error) { diff --git a/website/docs/d/redshift_cluster.html.markdown b/website/docs/d/redshift_cluster.html.markdown index 5064f771958..2fab74d4feb 100644 --- a/website/docs/d/redshift_cluster.html.markdown +++ b/website/docs/d/redshift_cluster.html.markdown @@ -51,6 +51,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: +* `arn` - Amazon Resource Name (ARN) of cluster. * `allow_version_upgrade` - Whether major version upgrades can be applied during maintenance period * `automated_snapshot_retention_period` - The backup retention period * `aqua_configuration_status` - The value represents how the cluster is configured to use AQUA. @@ -58,6 +59,7 @@ In addition to all arguments above, the following attributes are exported: * `availability_zone_relocation_enabled` - Indicates whether the cluster is able to be relocated to another availability zone. * `bucket_name` - The name of the S3 bucket where the log files are to be stored * `cluster_identifier` - The cluster identifier +* `cluster_nodes` - The nodes in the cluster. Cluster node blocks are documented below * `cluster_parameter_group_name` - The name of the parameter group to be associated with this cluster * `cluster_public_key` - The public key for the cluster * `cluster_revision_number` - The cluster revision number @@ -65,6 +67,7 @@ In addition to all arguments above, the following attributes are exported: * `cluster_subnet_group_name` - The name of a cluster subnet group to be associated with this cluster * `cluster_type` - The cluster type * `database_name` - The name of the default database in the cluster +* `default_iam_role_arn` - ∂The Amazon Resource Name (ARN) for the IAM role that was set as default for the cluster when the cluster was created. * `elastic_ip` - The Elastic IP of the cluster * `enable_logging` - Whether cluster logging is enabled * `encrypted` - Whether the cluster data is encrypted @@ -75,10 +78,20 @@ In addition to all arguments above, the following attributes are exported: * `master_username` - Username for the master DB user * `node_type` - The cluster node type * `number_of_nodes` - The number of nodes in the cluster +* `maintenance_track_name` - The name of the maintenance track for the restored cluster. +* `manual_snapshot_retention_period` - (Optional) The default number of days to retain a manual snapshot. * `port` - The port the cluster responds on * `preferred_maintenance_window` - The maintenance window * `publicly_accessible` - Whether the cluster is publicly accessible * `s3_key_prefix` - The folder inside the S3 bucket where the log files are stored +* `log_destination_type` - The log destination type. +* `log_exports` - The collection of exported log types. Log types include the connection log, user log and user activity log. * `tags` - The tags associated to the cluster * `vpc_id` - The VPC Id associated with the cluster * `vpc_security_group_ids` - The VPC security group Ids associated with the cluster + +Cluster nodes (for `cluster_nodes`) support the following attributes: + +* `node_role` - Whether the node is a leader node or a compute node +* `private_ip_address` - The private IP address of a node within a cluster +* `public_ip_address` - The public IP address of a node within a cluster diff --git a/website/docs/r/redshift_cluster.html.markdown b/website/docs/r/redshift_cluster.html.markdown index a9761d0b5e5..c48a82031c0 100644 --- a/website/docs/r/redshift_cluster.html.markdown +++ b/website/docs/r/redshift_cluster.html.markdown @@ -36,6 +36,7 @@ The following arguments are supported: * `cluster_identifier` - (Required) The Cluster Identifier. Must be a lower case string. * `database_name` - (Optional) The name of the first database to be created when the cluster is created. If you do not provide a name, Amazon Redshift will create a default database called `dev`. +* `default_iam_role_arn` - (Optional) The Amazon Resource Name (ARN) for the IAM role that was set as default for the cluster when the cluster was created. * `node_type` - (Required) The node type to be provisioned for the cluster. * `cluster_type` - (Optional) The cluster type to use. Either `single-node` or `multi-node`. * `master_password` - (Required unless a `snapshot_identifier` is provided) Password for the master DB user. @@ -73,6 +74,8 @@ The following arguments are supported: * `owner_account` - (Optional) The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. * `iam_roles` - (Optional) A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time. * `logging` - (Optional) Logging, documented below. +* `maintenance_track_name` - (Optional) The name of the maintenance track for the restored cluster. When you take a snapshot, the snapshot inherits the MaintenanceTrack value from the cluster. The snapshot might be on a different track than the cluster that was the source for the snapshot. For example, suppose that you take a snapshot of a cluster that is on the current track and then change the cluster to be on the trailing track. In this case, the snapshot and the source cluster are on different tracks. Default value is `current`. +* `manual_snapshot_retention_period` - (Optional) The default number of days to retain a manual snapshot. If the value is -1, the snapshot is retained indefinitely. This setting doesn't change the retention period of existing snapshots. Valid values are between `-1` and `3653`. Default value is `-1`. * `snapshot_copy` - (Optional) Configuration of automatic copy of snapshots from one region to another. Documented below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.