Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

resource/aws_rds_cluster: Consolidate CreateDBCluster logic and prevent creation issue when global_cluster_identifier and replication_source_identifier are both configured #14490

Merged
merged 1 commit into from
Aug 6, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 4 additions & 92 deletions aws/resource_aws_rds_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -541,98 +541,6 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
if err != nil {
return fmt.Errorf("Error creating RDS Cluster: %s", err)
}
} else if _, ok := d.GetOk("replication_source_identifier"); ok {
createOpts := &rds.CreateDBClusterInput{
CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)),
DBClusterIdentifier: aws.String(identifier),
DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)),
Engine: aws.String(d.Get("engine").(string)),
EngineMode: aws.String(d.Get("engine_mode").(string)),
ReplicationSourceIdentifier: aws.String(d.Get("replication_source_identifier").(string)),
ScalingConfiguration: expandRdsClusterScalingConfiguration(d.Get("scaling_configuration").([]interface{})),
Tags: tags,
}

// Need to check value > 0 due to:
// InvalidParameterValue: Backtrack is not enabled for the aurora-postgresql engine.
if v, ok := d.GetOk("backtrack_window"); ok && v.(int) > 0 {
createOpts.BacktrackWindow = aws.Int64(int64(v.(int)))
}

if attr, ok := d.GetOk("port"); ok {
createOpts.Port = aws.Int64(int64(attr.(int)))
}

if attr, ok := d.GetOk("db_subnet_group_name"); ok {
createOpts.DBSubnetGroupName = aws.String(attr.(string))
}

if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok {
createOpts.DBClusterParameterGroupName = aws.String(attr.(string))
}

if attr, ok := d.GetOk("engine_version"); ok {
createOpts.EngineVersion = aws.String(attr.(string))
}

if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
createOpts.VpcSecurityGroupIds = expandStringList(attr.List())
}

if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 {
createOpts.AvailabilityZones = expandStringList(attr.List())
}

if v, ok := d.GetOk("backup_retention_period"); ok {
createOpts.BackupRetentionPeriod = aws.Int64(int64(v.(int)))
}

if v, ok := d.GetOk("preferred_backup_window"); ok {
createOpts.PreferredBackupWindow = aws.String(v.(string))
}

if v, ok := d.GetOk("preferred_maintenance_window"); ok {
createOpts.PreferredMaintenanceWindow = aws.String(v.(string))
}

if attr, ok := d.GetOk("kms_key_id"); ok {
createOpts.KmsKeyId = aws.String(attr.(string))
}

if attr, ok := d.GetOk("source_region"); ok {
createOpts.SourceRegion = aws.String(attr.(string))
}

if attr, ok := d.GetOkExists("storage_encrypted"); ok {
createOpts.StorageEncrypted = aws.Bool(attr.(bool))
}

if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && len(attr.([]interface{})) > 0 {
createOpts.EnableCloudwatchLogsExports = expandStringList(attr.([]interface{}))
}

log.Printf("[DEBUG] Create RDS Cluster as read replica: %s", createOpts)
var resp *rds.CreateDBClusterOutput
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
var err error
resp, err = conn.CreateDBCluster(createOpts)
if err != nil {
if isAWSErr(err, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if isResourceTimeoutError(err) {
resp, err = conn.CreateDBCluster(createOpts)
}
if err != nil {
return fmt.Errorf("error creating RDS cluster: %s", err)
}

log.Printf("[DEBUG]: RDS Cluster create response: %s", resp)

} else if v, ok := d.GetOk("s3_import"); ok {
if _, ok := d.GetOk("master_password"); !ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "master_password": required field is not set`, d.Get("name").(string))
Expand Down Expand Up @@ -849,6 +757,10 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
createOpts.EnableCloudwatchLogsExports = expandStringList(attr.([]interface{}))
}

if attr, ok := d.GetOk("replication_source_identifier"); ok && createOpts.GlobalClusterIdentifier == nil {
createOpts.ReplicationSourceIdentifier = aws.String(attr.(string))
}

if attr, ok := d.GetOkExists("storage_encrypted"); ok {
createOpts.StorageEncrypted = aws.Bool(attr.(bool))
}
Expand Down
123 changes: 123 additions & 0 deletions aws/resource_aws_rds_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1292,6 +1292,35 @@ func TestAccAWSRDSCluster_GlobalClusterIdentifier_PrimarySecondaryClusters(t *te
})
}

// Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/13715
func TestAccAWSRDSCluster_GlobalClusterIdentifier_ReplicationSourceIdentifier(t *testing.T) {
var providers []*schema.Provider
var primaryDbCluster, secondaryDbCluster rds.DBCluster

rName := acctest.RandomWithPrefix("tf-acc-test")
resourceNamePrimary := "aws_rds_cluster.primary"
resourceNameSecondary := "aws_rds_cluster.secondary"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
testAccMultipleRegionPreCheck(t, 2)
testAccPreCheckAWSRdsGlobalCluster(t)
},
ProviderFactories: testAccProviderFactories(&providers),
CheckDestroy: testAccCheckAWSClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSRDSClusterConfig_GlobalClusterIdentifier_ReplicationSourceIdentifier(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterExistsWithProvider(resourceNamePrimary, &primaryDbCluster, testAccAwsRegionProviderFunc(testAccGetRegion(), &providers)),
testAccCheckAWSClusterExistsWithProvider(resourceNameSecondary, &secondaryDbCluster, testAccAwsRegionProviderFunc(testAccGetAlternateRegion(), &providers)),
),
},
},
})
}

func TestAccAWSRDSCluster_Port(t *testing.T) {
var dbCluster1, dbCluster2 rds.DBCluster
rInt := acctest.RandInt()
Expand Down Expand Up @@ -3262,6 +3291,100 @@ resource "aws_rds_cluster_instance" "secondary" {
`, rNameGlobal, rNamePrimary, rNameSecondary))
}

func testAccAWSRDSClusterConfig_GlobalClusterIdentifier_ReplicationSourceIdentifier(rName string) string {
return composeConfig(
testAccMultipleRegionProviderConfig(2),
fmt.Sprintf(`
data "aws_region" "current" {}

data "aws_availability_zones" "alternate" {
provider = "awsalternate"
state = "available"

filter {
name = "opt-in-status"
values = ["opt-in-not-required"]
}
}

resource "aws_rds_global_cluster" "test" {
global_cluster_identifier = %[1]q
engine = "aurora-postgresql"
engine_version = "10.11"
}

resource "aws_rds_cluster" "primary" {
cluster_identifier = "%[1]s-primary"
database_name = "mydb"
engine = aws_rds_global_cluster.test.engine
engine_version = aws_rds_global_cluster.test.engine_version
global_cluster_identifier = aws_rds_global_cluster.test.id
master_password = "barbarbar"
master_username = "foo"
skip_final_snapshot = true
}

resource "aws_rds_cluster_instance" "primary" {
cluster_identifier = aws_rds_cluster.primary.id
engine = aws_rds_cluster.primary.engine
engine_version = aws_rds_cluster.primary.engine_version
identifier = "%[1]s-primary"
instance_class = "db.r4.large" # only db.r4 or db.r5 are valid for Aurora global db
}

resource "aws_vpc" "alternate" {
provider = "awsalternate"
cidr_block = "10.0.0.0/16"

tags = {
Name = %[1]q
}
}

resource "aws_subnet" "alternate" {
provider = "awsalternate"
count = 3
vpc_id = aws_vpc.alternate.id
availability_zone = data.aws_availability_zones.alternate.names[count.index]
cidr_block = "10.0.${count.index}.0/24"

tags = {
Name = %[1]q
}
}

resource "aws_db_subnet_group" "alternate" {
provider = "awsalternate"
name = "%[1]s"
subnet_ids = aws_subnet.alternate[*].id
}

resource "aws_rds_cluster" "secondary" {
provider = "awsalternate"
depends_on = [aws_rds_cluster_instance.primary]

cluster_identifier = "%[1]s-secondary"
db_subnet_group_name = aws_db_subnet_group.alternate.name
engine = aws_rds_global_cluster.test.engine
engine_version = aws_rds_global_cluster.test.engine_version
global_cluster_identifier = aws_rds_global_cluster.test.id
replication_source_identifier = aws_rds_cluster.primary.arn
skip_final_snapshot = true
source_region = data.aws_region.current.name
}

resource "aws_rds_cluster_instance" "secondary" {
provider = "awsalternate"

cluster_identifier = aws_rds_cluster.secondary.id
engine = aws_rds_cluster.secondary.engine
engine_version = aws_rds_cluster.secondary.engine_version
identifier = "%[1]s-secondary"
instance_class = aws_rds_cluster_instance.primary.instance_class
}
`, rName))
}

func testAccAWSRDSClusterConfig_ScalingConfiguration(rName string, autoPause bool, maxCapacity, minCapacity, secondsUntilAutoPause int, timeoutAction string) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "test" {
Expand Down
2 changes: 1 addition & 1 deletion website/docs/r/rds_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ The following arguments are supported:
* `port` - (Optional) The port on which the DB accepts connections
* `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per region. e.g. 04:00-09:00
* `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g. wed:04:00-wed:04:30
* `replication_source_identifier` - (Optional) ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica.
* `replication_source_identifier` - (Optional) ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. If DB Cluster is part of a Global Cluster, use the [`lifecycle` configuration block `ignore_changes` argument](/docs/configuration/resources.html#ignore_changes) to prevent Terraform from showing differences for this argument instead of configuring this value.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

did you come across the behavior where setting this argument as Computed would cause the "secondary" cluster to remain even after successfully destroying its instance and the other clusters, but no error would be returned? curious if its different than the case of the global_cluster_identifier, where having that field as Computed doesn't allow deletion from the global_cluster at all

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The crux of the issue with global_cluster_identifier was that it was essentially not possible to perform a configuration update to remove the cluster from the Global Cluster while also keeping the Global Cluster around. There's small writeup of the problem with Computed: true not even allowing operators to set the value to "" to remove things in the #14487 PR description. I tried fiddling with it last night for a few hours without luck, since Terraform never invokes the resource update function even though it should.

I do not believe there is a covering test for removing replication_source_identifier at the moment (to promote a secondary cluster to standalone) like there is TestAccAWSRDSCluster_GlobalClusterIdentifier_EngineMode_Global_Remove 🙁 It'd be awesome to add one though! It'd be related to #6749.

* `scaling_configuration` - (Optional) Nested attribute with scaling properties. Only valid when `engine_mode` is set to `serverless`. More details below.
* `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`.
* `snapshot_identifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot.
Expand Down