diff --git a/.changelog/34843.txt b/.changelog/34843.txt new file mode 100644 index 00000000000..8120fc4fb11 --- /dev/null +++ b/.changelog/34843.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_db_snapshot_copy: Add `shared_accounts` argument +``` \ No newline at end of file diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index d80f9302918..152cb715b53 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -1901,3 +1901,131 @@ func waitDBClusterDeleted(ctx context.Context, conn *rds.RDS, id string, timeout return nil, err } + +func expandScalingConfiguration(tfMap map[string]interface{}) *rds.ScalingConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &rds.ScalingConfiguration{} + + if v, ok := tfMap["auto_pause"].(bool); ok { + apiObject.AutoPause = aws.Bool(v) + } + + if v, ok := tfMap[names.AttrMaxCapacity].(int); ok { + apiObject.MaxCapacity = aws.Int64(int64(v)) + } + + if v, ok := tfMap["min_capacity"].(int); ok { + apiObject.MinCapacity = aws.Int64(int64(v)) + } + + if v, ok := tfMap["seconds_before_timeout"].(int); ok { + apiObject.SecondsBeforeTimeout = aws.Int64(int64(v)) + } + + if v, ok := tfMap["seconds_until_auto_pause"].(int); ok { + apiObject.SecondsUntilAutoPause = aws.Int64(int64(v)) + } + + if v, ok := tfMap["timeout_action"].(string); ok && v != "" { + apiObject.TimeoutAction = aws.String(v) + } + + return apiObject +} + +func flattenScalingConfigurationInfo(apiObject *rds.ScalingConfigurationInfo) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.AutoPause; v != nil { + tfMap["auto_pause"] = aws.BoolValue(v) + } + + if v := apiObject.MaxCapacity; v != nil { + tfMap[names.AttrMaxCapacity] = aws.Int64Value(v) + } + + if v := apiObject.MaxCapacity; v != nil { + tfMap[names.AttrMaxCapacity] = aws.Int64Value(v) + } + + if v := apiObject.MinCapacity; v != nil { + tfMap["min_capacity"] = aws.Int64Value(v) + } + + if v := apiObject.SecondsBeforeTimeout; v != nil { + tfMap["seconds_before_timeout"] = aws.Int64Value(v) + } + + if v := apiObject.SecondsUntilAutoPause; v != nil { + tfMap["seconds_until_auto_pause"] = aws.Int64Value(v) + } + + if v := apiObject.TimeoutAction; v != nil { + tfMap["timeout_action"] = aws.StringValue(v) + } + + return tfMap +} + +func expandServerlessV2ScalingConfiguration(tfMap map[string]interface{}) *rds.ServerlessV2ScalingConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &rds.ServerlessV2ScalingConfiguration{} + + if v, ok := tfMap[names.AttrMaxCapacity].(float64); ok && v != 0.0 { + apiObject.MaxCapacity = aws.Float64(v) + } + + if v, ok := tfMap["min_capacity"].(float64); ok && v != 0.0 { + apiObject.MinCapacity = aws.Float64(v) + } + + return apiObject +} + +func flattenServerlessV2ScalingConfigurationInfo(apiObject *rds.ServerlessV2ScalingConfigurationInfo) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.MaxCapacity; v != nil { + tfMap[names.AttrMaxCapacity] = aws.Float64Value(v) + } + + if v := apiObject.MinCapacity; v != nil { + tfMap["min_capacity"] = aws.Float64Value(v) + } + + return tfMap +} + +// TODO Move back to 'flex.go' once migrate to AWS SDK for Go v2. +func flattenManagedMasterUserSecret(apiObject *rds.MasterUserSecret) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + if v := apiObject.KmsKeyId; v != nil { + tfMap[names.AttrKMSKeyID] = aws.StringValue(v) + } + if v := apiObject.SecretArn; v != nil { + tfMap["secret_arn"] = aws.StringValue(v) + } + if v := apiObject.SecretStatus; v != nil { + tfMap["secret_status"] = aws.StringValue(v) + } + + return tfMap +} diff --git a/internal/service/rds/cluster_endpoint.go b/internal/service/rds/cluster_endpoint.go index a0b1703b5a9..205f5a18e17 100644 --- a/internal/service/rds/cluster_endpoint.go +++ b/internal/service/rds/cluster_endpoint.go @@ -8,8 +8,9 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -17,6 +18,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -26,7 +28,7 @@ import ( // @SDKResource("aws_rds_cluster_endpoint", name="Cluster Endpoint") // @Tags(identifierAttribute="arn") // @Testing(tagsTest=false) -func ResourceClusterEndpoint() *schema.Resource { +func resourceClusterEndpoint() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceClusterEndpointCreate, ReadWithoutTimeout: resourceClusterEndpointRead, @@ -87,36 +89,37 @@ func ResourceClusterEndpoint() *schema.Resource { } func resourceClusterEndpointCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - const ( - clusterEndpointCreateTimeout = 30 * time.Minute - ) var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) endpointID := d.Get("cluster_endpoint_identifier").(string) input := &rds.CreateDBClusterEndpointInput{ DBClusterEndpointIdentifier: aws.String(endpointID), DBClusterIdentifier: aws.String(d.Get(names.AttrClusterIdentifier).(string)), EndpointType: aws.String(d.Get("custom_endpoint_type").(string)), - Tags: getTagsIn(ctx), + Tags: getTagsInV2(ctx), } if v, ok := d.GetOk("excluded_members"); ok && v.(*schema.Set).Len() > 0 { - input.ExcludedMembers = flex.ExpandStringSet(v.(*schema.Set)) + input.ExcludedMembers = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("static_members"); ok && v.(*schema.Set).Len() > 0 { - input.StaticMembers = flex.ExpandStringSet(v.(*schema.Set)) + input.StaticMembers = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err := conn.CreateDBClusterEndpointWithContext(ctx, input) + _, err := conn.CreateDBClusterEndpoint(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "creating RDS Cluster Endpoint (%s): %s", endpointID, err) } d.SetId(endpointID) - if _, err := waitClusterEndpointCreated(ctx, conn, d.Id(), clusterEndpointCreateTimeout); err != nil { + const ( + timeout = 30 * time.Minute + ) + if _, err := waitClusterEndpointCreated(ctx, conn, d.Id(), timeout); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for RDS Cluster Endpoint (%s) create: %s", d.Id(), err) } @@ -125,9 +128,9 @@ func resourceClusterEndpointCreate(ctx context.Context, d *schema.ResourceData, func resourceClusterEndpointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) - clusterEp, err := FindDBClusterEndpointByID(ctx, conn, d.Id()) + clusterEp, err := findDBClusterEndpointByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] RDS Cluster Endpoint (%s) not found, removing from state", d.Id()) @@ -139,21 +142,20 @@ func resourceClusterEndpointRead(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "reading RDS Cluster Endpoint (%s): %s", d.Id(), err) } - arn := aws.StringValue(clusterEp.DBClusterEndpointArn) - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, clusterEp.DBClusterEndpointArn) d.Set("cluster_endpoint_identifier", clusterEp.DBClusterEndpointIdentifier) d.Set(names.AttrClusterIdentifier, clusterEp.DBClusterIdentifier) d.Set("custom_endpoint_type", clusterEp.CustomEndpointType) d.Set(names.AttrEndpoint, clusterEp.Endpoint) - d.Set("excluded_members", aws.StringValueSlice(clusterEp.ExcludedMembers)) - d.Set("static_members", aws.StringValueSlice(clusterEp.StaticMembers)) + d.Set("excluded_members", clusterEp.ExcludedMembers) + d.Set("static_members", clusterEp.StaticMembers) return diags } func resourceClusterEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &rds.ModifyDBClusterEndpointInput{ @@ -165,18 +167,19 @@ func resourceClusterEndpointUpdate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("excluded_members"); ok && v.(*schema.Set).Len() > 0 { - input.ExcludedMembers = flex.ExpandStringSet(v.(*schema.Set)) + input.ExcludedMembers = flex.ExpandStringValueSet(v.(*schema.Set)) } else { - input.ExcludedMembers = aws.StringSlice([]string{}) + input.ExcludedMembers = []string{} } if v, ok := d.GetOk("static_members"); ok && v.(*schema.Set).Len() > 0 { - input.StaticMembers = flex.ExpandStringSet(v.(*schema.Set)) + input.StaticMembers = flex.ExpandStringValueSet(v.(*schema.Set)) } else { - input.StaticMembers = aws.StringSlice([]string{}) + input.StaticMembers = []string{} } - _, err := conn.ModifyDBClusterEndpointWithContext(ctx, input) + _, err := conn.ModifyDBClusterEndpoint(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "modifying RDS Cluster Endpoint (%s): %s", d.Id(), err) } @@ -187,12 +190,13 @@ func resourceClusterEndpointUpdate(ctx context.Context, d *schema.ResourceData, func resourceClusterEndpointDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) log.Printf("[DEBUG] Deleting RDS Cluster Endpoint: %s", d.Id()) - _, err := conn.DeleteDBClusterEndpointWithContext(ctx, &rds.DeleteDBClusterEndpointInput{ + _, err := conn.DeleteDBClusterEndpoint(ctx, &rds.DeleteDBClusterEndpointInput{ DBClusterEndpointIdentifier: aws.String(d.Id()), }) + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting RDS Cluster Endpoint (%s): %s", d.Id(), err) } @@ -204,39 +208,60 @@ func resourceClusterEndpointDelete(ctx context.Context, d *schema.ResourceData, return diags } -func FindDBClusterEndpointByID(ctx context.Context, conn *rds.RDS, id string) (*rds.DBClusterEndpoint, error) { +func findDBClusterEndpointByID(ctx context.Context, conn *rds.Client, id string) (*types.DBClusterEndpoint, error) { input := &rds.DescribeDBClusterEndpointsInput{ DBClusterEndpointIdentifier: aws.String(id), } + output, err := findDBClusterEndpoint(ctx, conn, input, tfslices.PredicateTrue[*types.DBClusterEndpoint]()) - output, err := conn.DescribeDBClusterEndpointsWithContext(ctx, input) if err != nil { return nil, err } - if output == nil || len(output.DBClusterEndpoints) == 0 || output.DBClusterEndpoints[0] == nil { - return nil, tfresource.NewEmptyResultError(input) + // Eventual consistency check. + if aws.ToString(output.DBClusterEndpointIdentifier) != id { + return nil, &retry.NotFoundError{ + LastRequest: input, + } } - if count := len(output.DBClusterEndpoints); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) + return output, nil +} + +func findDBClusterEndpoint(ctx context.Context, conn *rds.Client, input *rds.DescribeDBClusterEndpointsInput, filter tfslices.Predicate[*types.DBClusterEndpoint]) (*types.DBClusterEndpoint, error) { + output, err := findDBClusterEndpoints(ctx, conn, input, filter) + + if err != nil { + return nil, err } - dbClusterEndpoint := output.DBClusterEndpoints[0] + return tfresource.AssertSingleValueResult(output) +} - // Eventual consistency check. - if aws.StringValue(dbClusterEndpoint.DBClusterEndpointIdentifier) != id { - return nil, &retry.NotFoundError{ - LastRequest: input, +func findDBClusterEndpoints(ctx context.Context, conn *rds.Client, input *rds.DescribeDBClusterEndpointsInput, filter tfslices.Predicate[*types.DBClusterEndpoint]) ([]types.DBClusterEndpoint, error) { + var output []types.DBClusterEndpoint + + pages := rds.NewDescribeDBClusterEndpointsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.DBClusterEndpoints { + if filter(&v) { + output = append(output, v) + } } } - return dbClusterEndpoint, nil + return output, nil } -func statusClusterEndpoint(ctx context.Context, conn *rds.RDS, id string) retry.StateRefreshFunc { +func statusClusterEndpoint(ctx context.Context, conn *rds.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindDBClusterEndpointByID(ctx, conn, id) + output, err := findDBClusterEndpointByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -246,14 +271,14 @@ func statusClusterEndpoint(ctx context.Context, conn *rds.RDS, id string) retry. return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } -func waitClusterEndpointCreated(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) (*rds.DBClusterEndpoint, error) { +func waitClusterEndpointCreated(ctx context.Context, conn *rds.Client, id string, timeout time.Duration) (*types.DBClusterEndpoint, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{"creating"}, - Target: []string{"available"}, + Pending: []string{clusterEndpointStatusCreating}, + Target: []string{clusterEndpointStatusAvailable}, Refresh: statusClusterEndpoint(ctx, conn, id), Timeout: timeout, Delay: 5 * time.Second, @@ -262,16 +287,16 @@ func waitClusterEndpointCreated(ctx context.Context, conn *rds.RDS, id string, t outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*rds.DBClusterEndpoint); ok { + if output, ok := outputRaw.(*types.DBClusterEndpoint); ok { return output, err } return nil, err } -func waitClusterEndpointDeleted(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) (*rds.DBClusterEndpoint, error) { +func waitClusterEndpointDeleted(ctx context.Context, conn *rds.Client, id string, timeout time.Duration) (*types.DBClusterEndpoint, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{"available", "deleting"}, + Pending: []string{clusterEndpointStatusAvailable, clusterEndpointStatusDeleting}, Target: []string{}, Refresh: statusClusterEndpoint(ctx, conn, id), Timeout: timeout, @@ -281,7 +306,7 @@ func waitClusterEndpointDeleted(ctx context.Context, conn *rds.RDS, id string, t outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*rds.DBClusterEndpoint); ok { + if output, ok := outputRaw.(*types.DBClusterEndpoint); ok { return output, err } diff --git a/internal/service/rds/cluster_endpoint_test.go b/internal/service/rds/cluster_endpoint_test.go index 44064f7d6c2..d091206484d 100644 --- a/internal/service/rds/cluster_endpoint_test.go +++ b/internal/service/rds/cluster_endpoint_test.go @@ -6,12 +6,10 @@ package rds_test import ( "context" "fmt" - "strings" "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -28,9 +26,9 @@ func TestAccRDSClusterEndpoint_basic(t *testing.T) { t.Skip("skipping long-running test in short mode") } - rInt := sdkacctest.RandInt() - var customReaderEndpoint rds.DBClusterEndpoint - var customEndpoint rds.DBClusterEndpoint + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + var customReaderEndpoint types.DBClusterEndpoint + var customEndpoint types.DBClusterEndpoint readerResourceName := "aws_rds_cluster_endpoint.reader" defaultResourceName := "aws_rds_cluster_endpoint.default" @@ -41,12 +39,10 @@ func TestAccRDSClusterEndpoint_basic(t *testing.T) { CheckDestroy: testAccCheckClusterEndpointDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterEndpointConfig_basic(rInt), - Check: resource.ComposeTestCheckFunc( + Config: testAccClusterEndpointConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterEndpointExists(ctx, readerResourceName, &customReaderEndpoint), - testAccCheckClusterEndpointAttributes(&customReaderEndpoint), testAccCheckClusterEndpointExists(ctx, defaultResourceName, &customEndpoint), - testAccCheckClusterEndpointAttributes(&customEndpoint), acctest.MatchResourceAttrRegionalARN(readerResourceName, names.AttrARN, "rds", regexache.MustCompile(`cluster-endpoint:.+`)), resource.TestCheckResourceAttrSet(readerResourceName, names.AttrEndpoint), acctest.MatchResourceAttrRegionalARN(defaultResourceName, names.AttrARN, "rds", regexache.MustCompile(`cluster-endpoint:.+`)), @@ -56,13 +52,13 @@ func TestAccRDSClusterEndpoint_basic(t *testing.T) { ), }, { - ResourceName: "aws_rds_cluster_endpoint.reader", + ResourceName: readerResourceName, ImportState: true, ImportStateVerify: true, }, { - ResourceName: "aws_rds_cluster_endpoint.default", + ResourceName: defaultResourceName, ImportState: true, ImportStateVerify: true, }, @@ -76,8 +72,8 @@ func TestAccRDSClusterEndpoint_tags(t *testing.T) { t.Skip("skipping long-running test in short mode") } - rInt := sdkacctest.RandInt() - var customReaderEndpoint rds.DBClusterEndpoint + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + var customReaderEndpoint types.DBClusterEndpoint resourceName := "aws_rds_cluster_endpoint.reader" resource.ParallelTest(t, resource.TestCase{ @@ -87,7 +83,7 @@ func TestAccRDSClusterEndpoint_tags(t *testing.T) { CheckDestroy: testAccCheckClusterEndpointDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterEndpointConfig_tags1(rInt, acctest.CtKey1, acctest.CtValue1), + Config: testAccClusterEndpointConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( testAccCheckClusterEndpointExists(ctx, resourceName, &customReaderEndpoint), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), @@ -100,7 +96,7 @@ func TestAccRDSClusterEndpoint_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccClusterEndpointConfig_tags2(rInt, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Config: testAccClusterEndpointConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( testAccCheckClusterEndpointExists(ctx, resourceName, &customReaderEndpoint), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), @@ -109,7 +105,7 @@ func TestAccRDSClusterEndpoint_tags(t *testing.T) { ), }, { - Config: testAccClusterEndpointConfig_tags1(rInt, acctest.CtKey2, acctest.CtValue2), + Config: testAccClusterEndpointConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( testAccCheckClusterEndpointExists(ctx, resourceName, &customReaderEndpoint), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), @@ -120,40 +116,9 @@ func TestAccRDSClusterEndpoint_tags(t *testing.T) { }) } -func testAccCheckClusterEndpointAttributes(v *rds.DBClusterEndpoint) resource.TestCheckFunc { - return func(s *terraform.State) error { - if aws.StringValue(v.Endpoint) == "" { - return fmt.Errorf("empty endpoint domain") - } - - if aws.StringValue(v.CustomEndpointType) != "READER" && - aws.StringValue(v.CustomEndpointType) != "ANY" { - return fmt.Errorf("Incorrect endpoint type: expected: READER or ANY, got: %s", aws.StringValue(v.CustomEndpointType)) - } - - if len(v.StaticMembers) == 0 && len(v.ExcludedMembers) == 0 { - return fmt.Errorf("Empty members") - } - - for _, m := range aws.StringValueSlice(v.StaticMembers) { - if !strings.HasPrefix(m, "tf-aurora-cluster-instance") { - return fmt.Errorf("Incorrect StaticMember Cluster Instance Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster-instance", m) - } - } - - for _, m := range aws.StringValueSlice(v.ExcludedMembers) { - if !strings.HasPrefix(m, "tf-aurora-cluster-instance") { - return fmt.Errorf("Incorrect ExcludeMember Cluster Instance Identifier prefix:\nexpected: %s\ngot: %s", "tf-aurora-cluster-instance", m) - } - } - - return nil - } -} - func testAccCheckClusterEndpointDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_rds_cluster_endpoint" { @@ -177,20 +142,17 @@ func testAccCheckClusterEndpointDestroy(ctx context.Context) resource.TestCheckF } } -func testAccCheckClusterEndpointExists(ctx context.Context, n string, v *rds.DBClusterEndpoint) resource.TestCheckFunc { +func testAccCheckClusterEndpointExists(ctx context.Context, n string, v *types.DBClusterEndpoint) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No RDS Cluster Endpoint ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) output, err := tfrds.FindDBClusterEndpointByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } @@ -201,10 +163,8 @@ func testAccCheckClusterEndpointExists(ctx context.Context, n string, v *rds.DBC } } -func testAccClusterEndpointBaseConfig(n int) string { - return acctest.ConfigCompose( - acctest.ConfigAvailableAZsNoOptIn(), - fmt.Sprintf(` +func testAccClusterEndpointConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` data "aws_rds_orderable_db_instance" "test" { engine = aws_rds_cluster.default.engine engine_version = aws_rds_cluster.default.engine_version @@ -212,42 +172,43 @@ data "aws_rds_orderable_db_instance" "test" { } resource "aws_rds_cluster" "default" { - cluster_identifier = "tf-aurora-cluster-%[1]d" + cluster_identifier = %[1]q availability_zones = [ data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1], data.aws_availability_zones.available.names[2] ] - database_name = "mydb" - master_username = "foo" - master_password = "mustbeeightcharaters" - db_cluster_parameter_group_name = "default.aurora5.6" - skip_final_snapshot = true + + database_name = "test" + engine = %[2]q + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true } resource "aws_rds_cluster_instance" "test1" { apply_immediately = true cluster_identifier = aws_rds_cluster.default.id - identifier = "tf-aurora-cluster-instance-test1-%[1]d" + identifier = "%[1]s-1" instance_class = data.aws_rds_orderable_db_instance.test.instance_class + engine = aws_rds_cluster.default.engine } resource "aws_rds_cluster_instance" "test2" { apply_immediately = true cluster_identifier = aws_rds_cluster.default.id - identifier = "tf-aurora-cluster-instance-test2-%[1]d" + identifier = "%[1]s-2" instance_class = data.aws_rds_orderable_db_instance.test.instance_class + engine = aws_rds_cluster.default.engine } -`, n)) +`, rName, tfrds.ClusterEngineAuroraMySQL)) } -func testAccClusterEndpointConfig_basic(n int) string { - return acctest.ConfigCompose( - testAccClusterEndpointBaseConfig(n), - fmt.Sprintf(` +func testAccClusterEndpointConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccClusterEndpointConfig_base(rName), fmt.Sprintf(` resource "aws_rds_cluster_endpoint" "reader" { cluster_identifier = aws_rds_cluster.default.id - cluster_endpoint_identifier = "reader-%[1]d" + cluster_endpoint_identifier = "%[1]s-reader" custom_endpoint_type = "READER" static_members = [aws_rds_cluster_instance.test2.id] @@ -255,21 +216,19 @@ resource "aws_rds_cluster_endpoint" "reader" { resource "aws_rds_cluster_endpoint" "default" { cluster_identifier = aws_rds_cluster.default.id - cluster_endpoint_identifier = "default-%[1]d" + cluster_endpoint_identifier = "%[1]s-default" custom_endpoint_type = "ANY" excluded_members = [aws_rds_cluster_instance.test2.id] } -`, n)) +`, rName)) } -func testAccClusterEndpointConfig_tags1(n int, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose( - testAccClusterEndpointBaseConfig(n), - fmt.Sprintf(` +func testAccClusterEndpointConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccClusterEndpointConfig_base(rName), fmt.Sprintf(` resource "aws_rds_cluster_endpoint" "reader" { cluster_identifier = aws_rds_cluster.default.id - cluster_endpoint_identifier = "reader-%[1]d" + cluster_endpoint_identifier = "%[1]s-reader" custom_endpoint_type = "READER" static_members = [aws_rds_cluster_instance.test2.id] @@ -278,16 +237,14 @@ resource "aws_rds_cluster_endpoint" "reader" { %[2]q = %[3]q } } -`, n, tagKey1, tagValue1)) +`, rName, tagKey1, tagValue1)) } -func testAccClusterEndpointConfig_tags2(n int, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose( - testAccClusterEndpointBaseConfig(n), - fmt.Sprintf(` +func testAccClusterEndpointConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccClusterEndpointConfig_base(rName), fmt.Sprintf(` resource "aws_rds_cluster_endpoint" "reader" { cluster_identifier = aws_rds_cluster.default.id - cluster_endpoint_identifier = "reader-%[1]d" + cluster_endpoint_identifier = "%[1]s-reader" custom_endpoint_type = "READER" static_members = [aws_rds_cluster_instance.test2.id] @@ -297,5 +254,5 @@ resource "aws_rds_cluster_endpoint" "reader" { %[4]q = %[5]q } } -`, n, tagKey1, tagValue1, tagKey2, tagValue2)) +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } diff --git a/internal/service/rds/cluster_instance.go b/internal/service/rds/cluster_instance.go index d91da2ae775..9ed9701a95a 100644 --- a/internal/service/rds/cluster_instance.go +++ b/internal/service/rds/cluster_instance.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -552,6 +553,96 @@ func resourceClusterInstanceDelete(ctx context.Context, d *schema.ResourceData, return diags } +func waitDBClusterInstanceCreated(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) (*rds.DBInstance, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + InstanceStatusBackingUp, + InstanceStatusConfiguringEnhancedMonitoring, + InstanceStatusConfiguringIAMDatabaseAuth, + InstanceStatusConfiguringLogExports, + InstanceStatusCreating, + InstanceStatusMaintenance, + InstanceStatusModifying, + InstanceStatusRebooting, + InstanceStatusRenaming, + InstanceStatusResettingMasterCredentials, + InstanceStatusStarting, + InstanceStatusStorageOptimization, + InstanceStatusUpgrading, + }, + Target: []string{InstanceStatusAvailable}, + Refresh: statusDBInstanceSDKv1(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*rds.DBInstance); ok { + return output, err + } + + return nil, err +} + +func waitDBClusterInstanceUpdated(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) (*rds.DBInstance, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + InstanceStatusBackingUp, + InstanceStatusConfiguringEnhancedMonitoring, + InstanceStatusConfiguringIAMDatabaseAuth, + InstanceStatusConfiguringLogExports, + InstanceStatusCreating, + InstanceStatusMaintenance, + InstanceStatusModifying, + InstanceStatusRebooting, + InstanceStatusRenaming, + InstanceStatusResettingMasterCredentials, + InstanceStatusStarting, + InstanceStatusStorageOptimization, + InstanceStatusUpgrading, + }, + Target: []string{InstanceStatusAvailable}, + Refresh: statusDBInstanceSDKv1(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*rds.DBInstance); ok { + return output, err + } + + return nil, err +} + +func waitDBClusterInstanceDeleted(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) (*rds.DBInstance, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + InstanceStatusConfiguringLogExports, + InstanceStatusDeletePreCheck, + InstanceStatusDeleting, + InstanceStatusModifying, + }, + Target: []string{}, + Refresh: statusDBInstanceSDKv1(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*rds.DBInstance); ok { + return output, err + } + + return nil, err +} + func clusterSetResourceDataEngineVersionFromClusterInstance(d *schema.ResourceData, c *rds.DBInstance) { oldVersion := d.Get(names.AttrEngineVersion).(string) newVersion := aws.StringValue(c.EngineVersion) diff --git a/internal/service/rds/cluster_parameter_group.go b/internal/service/rds/cluster_parameter_group.go index 257bbb50eaa..4cdc5534b82 100644 --- a/internal/service/rds/cluster_parameter_group.go +++ b/internal/service/rds/cluster_parameter_group.go @@ -6,18 +6,18 @@ package rds import ( "context" "log" + "slices" "time" - rds_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" "github.com/aws/aws-sdk-go-v2/service/rds/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/maps" @@ -31,7 +31,7 @@ import ( // @SDKResource("aws_rds_cluster_parameter_group", name="Cluster Parameter Group") // @Tags(identifierAttribute="arn") // @Testing(tagsTest=false) -func ResourceClusterParameterGroup() *schema.Resource { +func resourceClusterParameterGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceClusterParameterGroupCreate, ReadWithoutTimeout: resourceClusterParameterGroupRead, @@ -80,9 +80,10 @@ func ResourceClusterParameterGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "apply_method": { - Type: schema.TypeString, - Optional: true, - Default: "immediate", + Type: schema.TypeString, + Optional: true, + Default: types.ApplyMethodImmediate, + ValidateDiagFunc: enum.ValidateIgnoreCase[types.ApplyMethod](), }, names.AttrName: { Type: schema.TypeString, @@ -106,24 +107,25 @@ func ResourceClusterParameterGroup() *schema.Resource { func resourceClusterParameterGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) - groupName := create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) + name := create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) input := &rds.CreateDBClusterParameterGroupInput{ - DBClusterParameterGroupName: aws.String(groupName), + DBClusterParameterGroupName: aws.String(name), DBParameterGroupFamily: aws.String(d.Get(names.AttrFamily).(string)), Description: aws.String(d.Get(names.AttrDescription).(string)), - Tags: getTagsIn(ctx), + Tags: getTagsInV2(ctx), } - output, err := conn.CreateDBClusterParameterGroupWithContext(ctx, input) + output, err := conn.CreateDBClusterParameterGroup(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "creating DB Cluster Parameter Group (%s): %s", groupName, err) + return sdkdiag.AppendErrorf(diags, "creating RDS Cluster Parameter Group (%s): %s", name, err) } - d.SetId(groupName) + d.SetId(name) - // Set for update + // Set for update. d.Set(names.AttrARN, output.DBClusterParameterGroup.DBClusterParameterGroupArn) return append(diags, resourceClusterParameterGroupUpdate(ctx, d, meta)...) @@ -131,53 +133,39 @@ func resourceClusterParameterGroupCreate(ctx context.Context, d *schema.Resource func resourceClusterParameterGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) - dbClusterParameterGroup, err := FindDBClusterParameterGroupByName(ctx, conn, d.Id()) + dbClusterParameterGroup, err := findDBClusterParameterGroupByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] RDS DB Cluster Parameter Group (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] RDS Cluster Parameter Group (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading RDS DB Cluster Parameter Group (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading RDS Cluster Parameter Group (%s): %s", d.Id(), err) } - arn := aws.StringValue(dbClusterParameterGroup.DBClusterParameterGroupArn) - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, dbClusterParameterGroup.DBClusterParameterGroupArn) d.Set(names.AttrDescription, dbClusterParameterGroup.Description) d.Set(names.AttrFamily, dbClusterParameterGroup.DBParameterGroupFamily) d.Set(names.AttrName, dbClusterParameterGroup.DBClusterParameterGroupName) - d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.StringValue(dbClusterParameterGroup.DBClusterParameterGroupName))) + d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(dbClusterParameterGroup.DBClusterParameterGroupName))) - // Only include user customized parameters as there's hundreds of system/default ones + // Only include user customized parameters as there's hundreds of system/default ones. input := &rds.DescribeDBClusterParametersInput{ DBClusterParameterGroupName: aws.String(d.Id()), - Source: aws.String("user"), + Source: aws.String(parameterSourceUser), } - var parameters []*rds.Parameter - - err = conn.DescribeDBClusterParametersPagesWithContext(ctx, input, func(page *rds.DescribeDBClusterParametersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.Parameters { - if v != nil { - parameters = append(parameters, v) - } - } - return !lastPage - }) + parameters, err := findDBClusterParameters(ctx, conn, input, tfslices.PredicateTrue[*types.Parameter]()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading RDS Cluster Parameter Group (%s) parameters: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading RDS Cluster Parameter Group (%s) user parameters: %s", d.Id(), err) } - // add only system parameters that are set in the config + // Add only system parameters that are set in the config. p := d.Get(names.AttrParameter) if p == nil { p = new(schema.Set) @@ -187,29 +175,21 @@ func resourceClusterParameterGroupRead(ctx context.Context, d *schema.ResourceDa input = &rds.DescribeDBClusterParametersInput{ DBClusterParameterGroupName: aws.String(d.Id()), - Source: aws.String("system"), + Source: aws.String(parameterSourceSystem), } - err = conn.DescribeDBClusterParametersPagesWithContext(ctx, input, func(page *rds.DescribeDBClusterParametersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.Parameters { - for _, p := range configParameters { - if aws.StringValue(v.ParameterName) == aws.StringValue(p.ParameterName) { - parameters = append(parameters, v) - } - } - } - - return !lastPage + systemParameters, err := findDBClusterParameters(ctx, conn, input, func(v *types.Parameter) bool { + return slices.ContainsFunc(configParameters, func(p types.Parameter) bool { + return aws.ToString(p.ParameterName) == aws.ToString(v.ParameterName) + }) }) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading RDS Cluster Parameter Group (%s) parameters: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading RDS Cluster Parameter Group (%s) system parameters: %s", d.Id(), err) } + parameters = append(parameters, systemParameters...) + if err := d.Set(names.AttrParameter, flattenParameters(parameters)); err != nil { return sdkdiag.AppendErrorf(diags, "setting parameter: %s", err) } @@ -222,44 +202,36 @@ func resourceClusterParameterGroupUpdate(ctx context.Context, d *schema.Resource maxParamModifyChunk = 20 ) var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) if d.HasChange(names.AttrParameter) { o, n := d.GetChange(names.AttrParameter) - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) + os, ns := o.(*schema.Set), n.(*schema.Set) - // Expand the "parameter" set to aws-sdk-go compat []rds.Parameter. for _, chunk := range tfslices.Chunks(expandParameters(ns.Difference(os).List()), maxParamModifyChunk) { input := &rds.ModifyDBClusterParameterGroupInput{ DBClusterParameterGroupName: aws.String(d.Id()), Parameters: chunk, } - _, err := conn.ModifyDBClusterParameterGroupWithContext(ctx, input) + _, err := conn.ModifyDBClusterParameterGroup(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying DB Cluster Parameter Group (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying RDS Cluster Parameter Group (%s): %s", d.Id(), err) } } - toRemove := map[string]*rds.Parameter{} + toRemove := map[string]types.Parameter{} for _, p := range expandParameters(os.List()) { if p.ParameterName != nil { - toRemove[*p.ParameterName] = p + toRemove[aws.ToString(p.ParameterName)] = p } } for _, p := range expandParameters(ns.List()) { if p.ParameterName != nil { - delete(toRemove, *p.ParameterName) + delete(toRemove, aws.ToString(p.ParameterName)) } } @@ -271,12 +243,15 @@ func resourceClusterParameterGroupUpdate(ctx context.Context, d *schema.Resource ResetAllParameters: aws.Bool(false), } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 3*time.Minute, func() (interface{}, error) { - return conn.ResetDBClusterParameterGroupWithContext(ctx, input) - }, rds.ErrCodeInvalidDBParameterGroupStateFault, "has pending changes") + const ( + timeout = 3 * time.Minute + ) + _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidDBParameterGroupStateFault](ctx, timeout, func() (interface{}, error) { + return conn.ResetDBClusterParameterGroup(ctx, input) + }, "has pending changes") if err != nil { - return sdkdiag.AppendErrorf(diags, "resetting DB Cluster Parameter Group (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "resetting RDS Cluster Parameter Group (%s): %s", d.Id(), err) } } } @@ -288,25 +263,18 @@ func resourceClusterParameterGroupDelete(ctx context.Context, d *schema.Resource var diags diag.Diagnostics conn := meta.(*conns.AWSClient).RDSClient(ctx) - input := &rds_sdkv2.DeleteDBClusterParameterGroupInput{ - DBClusterParameterGroupName: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Deleting RDS DB Cluster Parameter Group: %s", d.Id()) - err := retry.RetryContext(ctx, 3*time.Minute, func() *retry.RetryError { - _, err := conn.DeleteDBClusterParameterGroup(ctx, input) - if errs.IsA[*types.DBParameterGroupNotFoundFault](err) { - return nil - } else if errs.IsA[*types.InvalidDBParameterGroupStateFault](err) { - return retry.RetryableError(err) - } - if err != nil { - return retry.NonRetryableError(err) - } - return nil + log.Printf("[DEBUG] Deleting RDS Cluster Parameter Group: %s", d.Id()) + const ( + timeout = 3 * time.Minute + ) + _, err := tfresource.RetryWhenIsA[*types.InvalidDBParameterGroupStateFault](ctx, timeout, func() (interface{}, error) { + return conn.DeleteDBClusterParameterGroup(ctx, &rds.DeleteDBClusterParameterGroupInput{ + DBClusterParameterGroupName: aws.String(d.Id()), + }) }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteDBClusterParameterGroup(ctx, input) + + if errs.IsA[*types.DBParameterGroupNotFoundFault](err) { + return diags } if err != nil { @@ -316,40 +284,88 @@ func resourceClusterParameterGroupDelete(ctx context.Context, d *schema.Resource return diags } -func FindDBClusterParameterGroupByName(ctx context.Context, conn *rds.RDS, name string) (*rds.DBClusterParameterGroup, error) { +func findDBClusterParameterGroupByName(ctx context.Context, conn *rds.Client, name string) (*types.DBClusterParameterGroup, error) { input := &rds.DescribeDBClusterParameterGroupsInput{ DBClusterParameterGroupName: aws.String(name), } + output, err := findDBClusterParameterGroup(ctx, conn, input, tfslices.PredicateTrue[*types.DBClusterParameterGroup]()) - output, err := conn.DescribeDBClusterParameterGroupsWithContext(ctx, input) + if err != nil { + return nil, err + } - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBParameterGroupNotFoundFault) { + // Eventual consistency check. + if aws.ToString(output.DBClusterParameterGroupName) != name { return nil, &retry.NotFoundError{ - LastError: err, LastRequest: input, } } + return output, nil +} + +func findDBClusterParameterGroup(ctx context.Context, conn *rds.Client, input *rds.DescribeDBClusterParameterGroupsInput, filter tfslices.Predicate[*types.DBClusterParameterGroup]) (*types.DBClusterParameterGroup, error) { + output, err := findDBClusterParameterGroups(ctx, conn, input, filter) + if err != nil { return nil, err } - if output == nil || len(output.DBClusterParameterGroups) == 0 || output.DBClusterParameterGroups[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } + return tfresource.AssertSingleValueResult(output) +} + +func findDBClusterParameterGroups(ctx context.Context, conn *rds.Client, input *rds.DescribeDBClusterParameterGroupsInput, filter tfslices.Predicate[*types.DBClusterParameterGroup]) ([]types.DBClusterParameterGroup, error) { + var output []types.DBClusterParameterGroup + + pages := rds.NewDescribeDBClusterParameterGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*types.DBParameterGroupNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } - if count := len(output.DBClusterParameterGroups); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) + for _, v := range page.DBClusterParameterGroups { + if filter(&v) { + output = append(output, v) + } + } } - dbClusterParameterGroup := output.DBClusterParameterGroups[0] + return output, nil +} - // Eventual consistency check. - if aws.StringValue(dbClusterParameterGroup.DBClusterParameterGroupName) != name { - return nil, &retry.NotFoundError{ - LastRequest: input, +func findDBClusterParameters(ctx context.Context, conn *rds.Client, input *rds.DescribeDBClusterParametersInput, filter tfslices.Predicate[*types.Parameter]) ([]types.Parameter, error) { + var output []types.Parameter + + pages := rds.NewDescribeDBClusterParametersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*types.DBParameterGroupNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.Parameters { + if filter(&v) { + output = append(output, v) + } } } - return dbClusterParameterGroup, nil + return output, nil } diff --git a/internal/service/rds/cluster_parameter_group_test.go b/internal/service/rds/cluster_parameter_group_test.go index 7f5c19e66a5..c64ab96c52b 100644 --- a/internal/service/rds/cluster_parameter_group_test.go +++ b/internal/service/rds/cluster_parameter_group_test.go @@ -5,12 +5,12 @@ package rds_test import ( "context" - "errors" "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -24,7 +24,7 @@ import ( func TestAccRDSClusterParameterGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -123,7 +123,7 @@ func TestAccRDSClusterParameterGroup_basic(t *testing.T) { func TestAccRDSClusterParameterGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -147,7 +147,7 @@ func TestAccRDSClusterParameterGroup_disappears(t *testing.T) { func TestAccRDSClusterParameterGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -193,7 +193,7 @@ func TestAccRDSClusterParameterGroup_tags(t *testing.T) { func TestAccRDSClusterParameterGroup_withApplyMethod(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster_parameter_group.test" @@ -235,7 +235,7 @@ func TestAccRDSClusterParameterGroup_withApplyMethod(t *testing.T) { func TestAccRDSClusterParameterGroup_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -263,7 +263,7 @@ func TestAccRDSClusterParameterGroup_namePrefix(t *testing.T) { func TestAccRDSClusterParameterGroup_NamePrefix_parameter(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -291,7 +291,7 @@ func TestAccRDSClusterParameterGroup_NamePrefix_parameter(t *testing.T) { func TestAccRDSClusterParameterGroup_generatedName(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -319,7 +319,7 @@ func TestAccRDSClusterParameterGroup_generatedName(t *testing.T) { func TestAccRDSClusterParameterGroup_GeneratedName_parameter(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -347,7 +347,7 @@ func TestAccRDSClusterParameterGroup_GeneratedName_parameter(t *testing.T) { func TestAccRDSClusterParameterGroup_only(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -378,7 +378,7 @@ func TestAccRDSClusterParameterGroup_only(t *testing.T) { func TestAccRDSClusterParameterGroup_updateParameters(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -439,7 +439,7 @@ func TestAccRDSClusterParameterGroup_updateParameters(t *testing.T) { func TestAccRDSClusterParameterGroup_caseParameters(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -476,7 +476,7 @@ func TestAccRDSClusterParameterGroup_caseParameters(t *testing.T) { func TestAccRDSClusterParameterGroup_dynamicDiffs(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBClusterParameterGroup + var v types.DBClusterParameterGroup resourceName := "aws_rds_cluster_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -515,7 +515,7 @@ func TestAccRDSClusterParameterGroup_dynamicDiffs(t *testing.T) { func testAccCheckClusterParameterGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_rds_cluster_parameter_group" { @@ -532,7 +532,7 @@ func testAccCheckClusterParameterGroupDestroy(ctx context.Context) resource.Test return err } - return fmt.Errorf("RDS DB Cluster Parameter Group %s still exists", rs.Primary.ID) + return fmt.Errorf("RDS Cluster Parameter Group %s still exists", rs.Primary.ID) } return nil @@ -546,33 +546,38 @@ func testAccCheckClusterParameterNotUserDefined(ctx context.Context, n, paramNam return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Parameter Group ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) - opts := rds.DescribeDBClusterParametersInput{ + input := &rds.DescribeDBClusterParametersInput{ DBClusterParameterGroupName: aws.String(rs.Primary.ID), } userDefined := false - out, err := conn.DescribeDBClusterParametersWithContext(ctx, &opts) - for _, param := range out.Parameters { - if *param.ParameterName == paramName && aws.StringValue(param.ParameterValue) != "" { - // Some of these resets leave the parameter name present but with a nil value - userDefined = true + pages := rds.NewDescribeDBClusterParametersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return err + } + + for _, param := range page.Parameters { + if aws.ToString(param.ParameterName) == paramName && aws.ToString(param.ParameterValue) != "" { + // Some of these resets leave the parameter name present but with a nil value. + userDefined = true + } } } if userDefined { - return fmt.Errorf("DB Parameter %s is user defined", paramName) + return fmt.Errorf("Cluster Parameter %s is user defined", paramName) } - return err + + return nil } } -func testAccCheckClusterParameterGroupAttributes(v *rds.DBClusterParameterGroup, name string) resource.TestCheckFunc { +func testAccCheckClusterParameterGroupAttributes(v *types.DBClusterParameterGroup, name string) resource.TestCheckFunc { return func(s *terraform.State) error { if *v.DBClusterParameterGroupName != name { return fmt.Errorf("bad name: %#v expected: %v", *v.DBClusterParameterGroupName, name) @@ -586,20 +591,17 @@ func testAccCheckClusterParameterGroupAttributes(v *rds.DBClusterParameterGroup, } } -func testAccCheckClusterParameterGroupExists(ctx context.Context, n string, v *rds.DBClusterParameterGroup) resource.TestCheckFunc { +func testAccCheckClusterParameterGroupExists(ctx context.Context, n string, v *types.DBClusterParameterGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No RDS DB Cluster Parameter Group ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) output, err := tfrds.FindDBClusterParameterGroupByName(ctx, conn, rs.Primary.ID) + if err != nil { return err } diff --git a/internal/service/rds/cluster_role_association.go b/internal/service/rds/cluster_role_association.go index fa1513839a9..3ad6f855a66 100644 --- a/internal/service/rds/cluster_role_association.go +++ b/internal/service/rds/cluster_role_association.go @@ -5,24 +5,29 @@ package rds import ( "context" + "fmt" "log" + "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_rds_cluster_role_association") -func ResourceClusterRoleAssociation() *schema.Resource { +// @SDKResource("aws_rds_cluster_role_association", name="Cluster IAM Role Association") +func resourceClusterRoleAssociation() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceClusterRoleAssociationCreate, ReadWithoutTimeout: resourceClusterRoleAssociationRead, @@ -60,40 +65,29 @@ func ResourceClusterRoleAssociation() *schema.Resource { func resourceClusterRoleAssociationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) dbClusterID := d.Get("db_cluster_identifier").(string) roleARN := d.Get(names.AttrRoleARN).(string) + id := clusterRoleAssociationCreateResourceID(dbClusterID, roleARN) input := &rds.AddRoleToDBClusterInput{ DBClusterIdentifier: aws.String(dbClusterID), FeatureName: aws.String(d.Get("feature_name").(string)), RoleArn: aws.String(roleARN), } - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { - var err error - _, err = conn.AddRoleToDBClusterWithContext(ctx, input) - if err != nil { - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") { - return retry.RetryableError(err) - } - return retry.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.AddRoleToDBClusterWithContext(ctx, input) - } + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { + return conn.AddRoleToDBCluster(ctx, input) + }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") + if err != nil { - return sdkdiag.AppendErrorf(diags, "creating RDS DB Cluster (%s) IAM Role (%s) Association: %s", dbClusterID, roleARN, err) + return sdkdiag.AppendErrorf(diags, "creating RDS Cluster IAM Role Association (%s): %s", id, err) } - d.SetId(ClusterRoleAssociationCreateResourceID(dbClusterID, roleARN)) + d.SetId(id) - _, err = waitDBClusterRoleAssociationCreated(ctx, conn, dbClusterID, roleARN, d.Timeout(schema.TimeoutCreate)) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for RDS DB Cluster (%s) IAM Role (%s) Association to create: %s", dbClusterID, roleARN, err) + if _, err := waitDBClusterRoleAssociationCreated(ctx, conn, dbClusterID, roleARN, d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for RDS Cluster IAM Role Association (%s) create: %s", d.Id(), err) } return append(diags, resourceClusterRoleAssociationRead(ctx, d, meta)...) @@ -101,14 +95,14 @@ func resourceClusterRoleAssociationCreate(ctx context.Context, d *schema.Resourc func resourceClusterRoleAssociationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) - dbClusterID, roleARN, err := ClusterRoleAssociationParseResourceID(d.Id()) + dbClusterID, roleARN, err := clusterRoleAssociationParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing RDS DB Cluster IAM Role Association ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - output, err := FindDBClusterRoleByDBClusterIDAndRoleARN(ctx, conn, dbClusterID, roleARN) + output, err := findDBClusterRoleByTwoPartKey(ctx, conn, dbClusterID, roleARN) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] RDS DB Cluster (%s) IAM Role (%s) Association not found, removing from state", dbClusterID, roleARN) @@ -117,7 +111,7 @@ func resourceClusterRoleAssociationRead(ctx context.Context, d *schema.ResourceD } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading RDS DB Cluster (%s) IAM Role (%s) Association: %s", dbClusterID, roleARN, err) + return sdkdiag.AppendErrorf(diags, "reading RDS Cluster IAM Role Association (%s): %s", d.Id(), err) } d.Set("db_cluster_identifier", dbClusterID) @@ -129,35 +123,193 @@ func resourceClusterRoleAssociationRead(ctx context.Context, d *schema.ResourceD func resourceClusterRoleAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) - dbClusterID, roleARN, err := ClusterRoleAssociationParseResourceID(d.Id()) + dbClusterID, roleARN, err := clusterRoleAssociationParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing RDS DB Cluster IAM Role Association ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - input := &rds.RemoveRoleFromDBClusterInput{ + log.Printf("[DEBUG] Deleting RDS Cluster IAM Role Association: %s", d.Id()) + _, err = conn.RemoveRoleFromDBCluster(ctx, &rds.RemoveRoleFromDBClusterInput{ DBClusterIdentifier: aws.String(dbClusterID), FeatureName: aws.String(d.Get("feature_name").(string)), RoleArn: aws.String(roleARN), + }) + + if errs.IsA[*types.DBClusterNotFoundFault](err) || errs.IsA[*types.DBClusterRoleNotFoundFault](err) { + return diags } - log.Printf("[DEBUG] Deleting RDS DB Cluster IAM Role Association: %s", d.Id()) - _, err = conn.RemoveRoleFromDBClusterWithContext(ctx, input) + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting RDS Cluster IAM Role Association (%s): %s", d.Id(), err) + } - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBClusterNotFoundFault) || tfawserr.ErrCodeEquals(err, rds.ErrCodeDBClusterRoleNotFoundFault) { - return diags + if _, err := waitDBClusterRoleAssociationDeleted(ctx, conn, dbClusterID, roleARN, d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for RDS Cluster IAM Role Association (%s) delete: %s", d.Id(), err) } + return diags +} + +const clusterRoleAssociationResourceIDSeparator = "," + +func clusterRoleAssociationCreateResourceID(dbClusterID, roleARN string) string { + parts := []string{dbClusterID, roleARN} + id := strings.Join(parts, clusterRoleAssociationResourceIDSeparator) + + return id +} + +func clusterRoleAssociationParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, clusterRoleAssociationResourceIDSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected DBCLUSTERID%[2]sROLEARN", id, clusterRoleAssociationResourceIDSeparator) +} + +func findDBClusterRoleByTwoPartKey(ctx context.Context, conn *rds.Client, dbClusterID, roleARN string) (*types.DBClusterRole, error) { + dbCluster, err := findDBClusterByIDV2(ctx, conn, dbClusterID) + if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting RDS DB Cluster (%s) IAM Role (%s) Association: %s", dbClusterID, roleARN, err) + return nil, err } - _, err = waitDBClusterRoleAssociationDeleted(ctx, conn, dbClusterID, roleARN, d.Timeout(schema.TimeoutDelete)) + output, err := tfresource.AssertSingleValueResult(tfslices.Filter(dbCluster.AssociatedRoles, func(v types.DBClusterRole) bool { + return aws.ToString(v.RoleArn) == roleARN + })) if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for RDS DB Cluster (%s) IAM Role (%s) Association to delete: %s", dbClusterID, roleARN, err) + return nil, err } - return diags + if status := aws.ToString(output.Status); status == clusterRoleStatusDeleted { + return nil, &retry.NotFoundError{ + Message: status, + } + } + + return output, nil +} + +func statusDBClusterRole(ctx context.Context, conn *rds.Client, dbClusterID, roleARN string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDBClusterRoleByTwoPartKey(ctx, conn, dbClusterID, roleARN) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.ToString(output.Status), nil + } +} + +func waitDBClusterRoleAssociationCreated(ctx context.Context, conn *rds.Client, dbClusterID, roleARN string, timeout time.Duration) (*types.DBClusterRole, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{clusterRoleStatusPending}, + Target: []string{clusterRoleStatusActive}, + Refresh: statusDBClusterRole(ctx, conn, dbClusterID, roleARN), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.DBClusterRole); ok { + return output, err + } + + return nil, err +} + +func waitDBClusterRoleAssociationDeleted(ctx context.Context, conn *rds.Client, dbClusterID, roleARN string, timeout time.Duration) (*types.DBClusterRole, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{clusterRoleStatusActive, clusterRoleStatusPending}, + Target: []string{}, + Refresh: statusDBClusterRole(ctx, conn, dbClusterID, roleARN), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.DBClusterRole); ok { + return output, err + } + + return nil, err +} + +// TODO Remove once aws_rds_cluster is migrated. +func findDBClusterByIDV2(ctx context.Context, conn *rds.Client, id string) (*types.DBCluster, error) { + input := &rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(id), + } + output, err := findDBClusterV2(ctx, conn, input, tfslices.PredicateTrue[*types.DBCluster]()) + + if err != nil { + return nil, err + } + + // Eventual consistency check. + if arn.IsARN(id) { + if aws.ToString(output.DBClusterArn) != id { + return nil, &retry.NotFoundError{ + LastRequest: input, + } + } + } else if aws.ToString(output.DBClusterIdentifier) != id { + return nil, &retry.NotFoundError{ + LastRequest: input, + } + } + + return output, nil +} + +func findDBClusterV2(ctx context.Context, conn *rds.Client, input *rds.DescribeDBClustersInput, filter tfslices.Predicate[*types.DBCluster]) (*types.DBCluster, error) { + output, err := findDBClustersV2(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findDBClustersV2(ctx context.Context, conn *rds.Client, input *rds.DescribeDBClustersInput, filter tfslices.Predicate[*types.DBCluster]) ([]types.DBCluster, error) { + var output []types.DBCluster + + pages := rds.NewDescribeDBClustersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*types.DBClusterNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.DBClusters { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil } diff --git a/internal/service/rds/cluster_role_association_test.go b/internal/service/rds/cluster_role_association_test.go index e5da670e4a5..e1e10471d3c 100644 --- a/internal/service/rds/cluster_role_association_test.go +++ b/internal/service/rds/cluster_role_association_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccRDSClusterRoleAssociation_basic(t *testing.T) { ctx := acctest.Context(t) - var dbClusterRole rds.DBClusterRole + var dbClusterRole types.DBClusterRole rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dbClusterResourceName := "aws_rds_cluster.test" iamRoleResourceName := "aws_iam_role.test" @@ -54,7 +54,7 @@ func TestAccRDSClusterRoleAssociation_basic(t *testing.T) { func TestAccRDSClusterRoleAssociation_disappears(t *testing.T) { ctx := acctest.Context(t) - var dbClusterRole rds.DBClusterRole + var dbClusterRole types.DBClusterRole rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster_role_association.test" @@ -78,7 +78,7 @@ func TestAccRDSClusterRoleAssociation_disappears(t *testing.T) { func TestAccRDSClusterRoleAssociation_Disappears_cluster(t *testing.T) { ctx := acctest.Context(t) - var dbClusterRole rds.DBClusterRole + var dbClusterRole types.DBClusterRole rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster_role_association.test" clusterResourceName := "aws_rds_cluster.test" @@ -103,7 +103,7 @@ func TestAccRDSClusterRoleAssociation_Disappears_cluster(t *testing.T) { func TestAccRDSClusterRoleAssociation_Disappears_role(t *testing.T) { ctx := acctest.Context(t) - var dbClusterRole rds.DBClusterRole + var dbClusterRole types.DBClusterRole rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster_role_association.test" roleResourceName := "aws_iam_role.test" @@ -126,27 +126,22 @@ func TestAccRDSClusterRoleAssociation_Disappears_role(t *testing.T) { }) } -func testAccCheckClusterRoleAssociationExists(ctx context.Context, resourceName string, v *rds.DBClusterRole) resource.TestCheckFunc { +func testAccCheckClusterRoleAssociationExists(ctx context.Context, resourceName string, v *types.DBClusterRole) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] - if !ok { return fmt.Errorf("Not found: %s", resourceName) } - dbClusterID, roleARN, err := tfrds.ClusterRoleAssociationParseResourceID(rs.Primary.ID) - if err != nil { - return err - } + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + output, err := tfrds.FindDBClusterRoleByTwoPartKey(ctx, conn, rs.Primary.Attributes["db_cluster_identifier"], rs.Primary.Attributes[names.AttrRoleARN]) - role, err := tfrds.FindDBClusterRoleByDBClusterIDAndRoleARN(ctx, conn, dbClusterID, roleARN) if err != nil { return err } - *v = *role + *v = *output return nil } @@ -154,19 +149,14 @@ func testAccCheckClusterRoleAssociationExists(ctx context.Context, resourceName func testAccCheckClusterRoleAssociationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_cluster_role_association" { continue } - dbClusterID, roleARN, err := tfrds.ClusterRoleAssociationParseResourceID(rs.Primary.ID) - if err != nil { - return err - } - - _, err = tfrds.FindDBClusterRoleByDBClusterIDAndRoleARN(ctx, conn, dbClusterID, roleARN) + _, err := tfrds.FindDBClusterRoleByTwoPartKey(ctx, conn, rs.Primary.Attributes["db_cluster_identifier"], rs.Primary.Attributes[names.AttrRoleARN]) if tfresource.NotFound(err) { continue diff --git a/internal/service/rds/cluster_snapshot.go b/internal/service/rds/cluster_snapshot.go index 751e08e5397..7523909ad0d 100644 --- a/internal/service/rds/cluster_snapshot.go +++ b/internal/service/rds/cluster_snapshot.go @@ -396,9 +396,5 @@ func findDBClusterSnapshotAttributes(ctx context.Context, conn *rds.Client, inpu return nil, tfresource.NewEmptyResultError(input) } - f := func(v types.DBClusterSnapshotAttribute) bool { - return filter(&v) - } - - return tfslices.Filter(output.DBClusterSnapshotAttributesResult.DBClusterSnapshotAttributes, f), nil + return tfslices.Filter(output.DBClusterSnapshotAttributesResult.DBClusterSnapshotAttributes, tfslices.PredicateValue(filter)), nil } diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index 767465ed38b..bc838bca70a 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -58,6 +58,7 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { "requested engine version was not found or does not support parallelquery functionality", "Backtrack is not enabled for the aurora engine", "Read replica DB clusters are not available in this region for engine aurora", + "no matching RDS Reserved Instance Offering found", ) } diff --git a/internal/service/rds/consts.go b/internal/service/rds/consts.go index ec4b746a099..8377892463f 100644 --- a/internal/service/rds/consts.go +++ b/internal/service/rds/consts.go @@ -44,6 +44,12 @@ const ( clusterSnapshotAttributeNameRestore = "restore" ) +const ( + clusterEndpointStatusAvailable = "available" + clusterEndpointStatusCreating = "creating" + clusterEndpointStatusDeleting = "deleting" +) + const ( storageTypeStandard = "standard" storageTypeGP2 = "gp2" @@ -134,8 +140,12 @@ const ( ) const ( - DBSnapshotAvailable = "available" - DBSnapshotCreating = "creating" + dbSnapshotAvailable = "available" + dbSnapshotCreating = "creating" +) + +const ( + dbSnapshotAttributeNameRestore = "restore" ) const ( @@ -314,3 +324,9 @@ const ( ReservedInstanceStateRetired = "retired" ReservedInstanceStatePaymentPending = "payment-pending" ) + +const ( + parameterSourceEngineDefault = "engine-default" + parameterSourceSystem = "system" + parameterSourceUser = "user" +) diff --git a/internal/service/rds/exports_test.go b/internal/service/rds/exports_test.go index ceead998a56..43e13208758 100644 --- a/internal/service/rds/exports_test.go +++ b/internal/service/rds/exports_test.go @@ -7,25 +7,37 @@ package rds var ( ResourceCertificate = resourceCertificate ResourceCluster = resourceCluster + ResourceClusterEndpoint = resourceClusterEndpoint + ResourceClusterParameterGroup = resourceClusterParameterGroup + ResourceClusterRoleAssociation = resourceClusterRoleAssociation ResourceClusterSnapshot = resourceClusterSnapshot ResourceEventSubscription = resourceEventSubscription + ResourceParameterGroup = resourceParameterGroup ResourceProxy = resourceProxy ResourceProxyDefaultTargetGroup = resourceProxyDefaultTargetGroup ResourceProxyEndpoint = resourceProxyEndpoint ResourceProxyTarget = resourceProxyTarget + ResourceSnapshot = resourceSnapshot + ResourceSnapshotCopy = resourceSnapshotCopy ResourceSubnetGroup = resourceSubnetGroup + FindDBClusterEndpointByID = findDBClusterEndpointByID + FindDBClusterParameterGroupByName = findDBClusterParameterGroupByName + FindDBClusterRoleByTwoPartKey = findDBClusterRoleByTwoPartKey FindDBClusterSnapshotByID = findDBClusterSnapshotByID FindDBInstanceByID = findDBInstanceByIDSDKv1 + FindDBParameterGroupByName = findDBParameterGroupByName FindDBProxyByName = findDBProxyByName FindDBProxyEndpointByTwoPartKey = findDBProxyEndpointByTwoPartKey FindDBProxyTargetByFourPartKey = findDBProxyTargetByFourPartKey + FindDBSnapshotByID = findDBSnapshotByID FindDBSubnetGroupByName = findDBSubnetGroupByName FindDefaultCertificate = findDefaultCertificate FindDefaultDBProxyTargetGroupByDBProxyName = findDefaultDBProxyTargetGroupByDBProxyName FindEventSubscriptionByID = findEventSubscriptionByID ListTags = listTags NewBlueGreenOrchestrator = newBlueGreenOrchestrator + ParameterGroupModifyChunk = parameterGroupModifyChunk ParseDBInstanceARN = parseDBInstanceARN ProxyTargetParseResourceID = proxyTargetParseResourceID WaitBlueGreenDeploymentDeleted = waitBlueGreenDeploymentDeleted diff --git a/internal/service/rds/find.go b/internal/service/rds/find.go deleted file mode 100644 index 44401bb7ef4..00000000000 --- a/internal/service/rds/find.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package rds - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindDBClusterRoleByDBClusterIDAndRoleARN(ctx context.Context, conn *rds.RDS, dbClusterID, roleARN string) (*rds.DBClusterRole, error) { - dbCluster, err := FindDBClusterByID(ctx, conn, dbClusterID) - if err != nil { - return nil, err - } - - for _, associatedRole := range dbCluster.AssociatedRoles { - if aws.StringValue(associatedRole.RoleArn) == roleARN { - if status := aws.StringValue(associatedRole.Status); status == clusterRoleStatusDeleted { - return nil, &retry.NotFoundError{ - Message: status, - } - } - - return associatedRole, nil - } - } - - return nil, &retry.NotFoundError{} -} - -func FindReservedDBInstanceByID(ctx context.Context, conn *rds.RDS, id string) (*rds.ReservedDBInstance, error) { - input := &rds.DescribeReservedDBInstancesInput{ - ReservedDBInstanceId: aws.String(id), - } - - output, err := conn.DescribeReservedDBInstancesWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, rds.ErrCodeReservedDBInstanceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || len(output.ReservedDBInstances) == 0 || output.ReservedDBInstances[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output.ReservedDBInstances); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return output.ReservedDBInstances[0], nil -} diff --git a/internal/service/rds/flex.go b/internal/service/rds/flex.go index 892118e908c..bef9051f62f 100644 --- a/internal/service/rds/flex.go +++ b/internal/service/rds/flex.go @@ -6,352 +6,60 @@ package rds import ( "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-provider-aws/names" ) -func expandScalingConfiguration(tfMap map[string]interface{}) *rds.ScalingConfiguration { - if tfMap == nil { - return nil - } - - apiObject := &rds.ScalingConfiguration{} - - if v, ok := tfMap["auto_pause"].(bool); ok { - apiObject.AutoPause = aws.Bool(v) - } - - if v, ok := tfMap[names.AttrMaxCapacity].(int); ok { - apiObject.MaxCapacity = aws.Int64(int64(v)) - } - - if v, ok := tfMap["min_capacity"].(int); ok { - apiObject.MinCapacity = aws.Int64(int64(v)) - } - - if v, ok := tfMap["seconds_before_timeout"].(int); ok { - apiObject.SecondsBeforeTimeout = aws.Int64(int64(v)) - } - - if v, ok := tfMap["seconds_until_auto_pause"].(int); ok { - apiObject.SecondsUntilAutoPause = aws.Int64(int64(v)) - } - - if v, ok := tfMap["timeout_action"].(string); ok && v != "" { - apiObject.TimeoutAction = aws.String(v) - } - - return apiObject -} - -func flattenManagedMasterUserSecret(apiObject *rds.MasterUserSecret) map[string]interface{} { - if apiObject == nil { - return nil - } - - tfMap := map[string]interface{}{} - if v := apiObject.KmsKeyId; v != nil { - tfMap[names.AttrKMSKeyID] = aws.StringValue(v) - } - if v := apiObject.SecretArn; v != nil { - tfMap["secret_arn"] = aws.StringValue(v) - } - if v := apiObject.SecretStatus; v != nil { - tfMap["secret_status"] = aws.StringValue(v) - } - - return tfMap -} - -func flattenScalingConfigurationInfo(apiObject *rds.ScalingConfigurationInfo) map[string]interface{} { - if apiObject == nil { - return nil - } - - tfMap := map[string]interface{}{} - - if v := apiObject.AutoPause; v != nil { - tfMap["auto_pause"] = aws.BoolValue(v) - } +func expandParameters(tfList []interface{}) []types.Parameter { + var apiObjects []types.Parameter - if v := apiObject.MaxCapacity; v != nil { - tfMap[names.AttrMaxCapacity] = aws.Int64Value(v) - } - - if v := apiObject.MaxCapacity; v != nil { - tfMap[names.AttrMaxCapacity] = aws.Int64Value(v) - } - - if v := apiObject.MinCapacity; v != nil { - tfMap["min_capacity"] = aws.Int64Value(v) - } - - if v := apiObject.SecondsBeforeTimeout; v != nil { - tfMap["seconds_before_timeout"] = aws.Int64Value(v) - } - - if v := apiObject.SecondsUntilAutoPause; v != nil { - tfMap["seconds_until_auto_pause"] = aws.Int64Value(v) - } - - if v := apiObject.TimeoutAction; v != nil { - tfMap["timeout_action"] = aws.StringValue(v) - } - - return tfMap -} - -func expandServerlessV2ScalingConfiguration(tfMap map[string]interface{}) *rds.ServerlessV2ScalingConfiguration { - if tfMap == nil { - return nil - } - - apiObject := &rds.ServerlessV2ScalingConfiguration{} - - if v, ok := tfMap[names.AttrMaxCapacity].(float64); ok && v != 0.0 { - apiObject.MaxCapacity = aws.Float64(v) - } - - if v, ok := tfMap["min_capacity"].(float64); ok && v != 0.0 { - apiObject.MinCapacity = aws.Float64(v) - } - - return apiObject -} - -func flattenServerlessV2ScalingConfigurationInfo(apiObject *rds.ServerlessV2ScalingConfigurationInfo) map[string]interface{} { - if apiObject == nil { - return nil - } - - tfMap := map[string]interface{}{} - - if v := apiObject.MaxCapacity; v != nil { - tfMap[names.AttrMaxCapacity] = aws.Float64Value(v) - } - - if v := apiObject.MinCapacity; v != nil { - tfMap["min_capacity"] = aws.Float64Value(v) - } - - return tfMap -} - -func expandOptionConfiguration(configured []interface{}) []*rds.OptionConfiguration { - var option []*rds.OptionConfiguration - - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) - - o := &rds.OptionConfiguration{ - OptionName: aws.String(data["option_name"].(string)), - } - - if raw, ok := data[names.AttrPort]; ok { - port := raw.(int) - if port != 0 { - o.Port = aws.Int64(int64(port)) - } - } - - if raw, ok := data["db_security_group_memberships"]; ok { - memberships := flex.ExpandStringSet(raw.(*schema.Set)) - if len(memberships) > 0 { - o.DBSecurityGroupMemberships = memberships - } - } - - if raw, ok := data["vpc_security_group_memberships"]; ok { - memberships := flex.ExpandStringSet(raw.(*schema.Set)) - if len(memberships) > 0 { - o.VpcSecurityGroupMemberships = memberships - } - } - - if raw, ok := data["option_settings"]; ok { - o.OptionSettings = expandOptionSetting(raw.(*schema.Set).List()) - } - - if raw, ok := data[names.AttrVersion]; ok && raw.(string) != "" { - o.OptionVersion = aws.String(raw.(string)) - } - - option = append(option, o) - } - - return option -} - -// Flattens an array of Options into a []map[string]interface{} -func flattenOptions(apiOptions []*rds.Option, optionConfigurations []*rds.OptionConfiguration) []map[string]interface{} { - result := make([]map[string]interface{}, 0) - - for _, apiOption := range apiOptions { - if apiOption == nil || apiOption.OptionName == nil { + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { continue } - var configuredOption *rds.OptionConfiguration - - for _, optionConfiguration := range optionConfigurations { - if aws.StringValue(apiOption.OptionName) == aws.StringValue(optionConfiguration.OptionName) { - configuredOption = optionConfiguration - break - } - } - - dbSecurityGroupMemberships := make([]interface{}, 0) - for _, db := range apiOption.DBSecurityGroupMemberships { - if db != nil { - dbSecurityGroupMemberships = append(dbSecurityGroupMemberships, aws.StringValue(db.DBSecurityGroupName)) - } - } - - optionSettings := make([]interface{}, 0) - for _, apiOptionSetting := range apiOption.OptionSettings { - // The RDS API responds with all settings. Omit settings that match default value, - // but only if unconfigured. This is to prevent operators from continually needing - // to continually update their Terraform configurations to match new option settings - // when added by the API. - var configuredOptionSetting *rds.OptionSetting - - if configuredOption != nil { - for _, configuredOptionOptionSetting := range configuredOption.OptionSettings { - if aws.StringValue(apiOptionSetting.Name) == aws.StringValue(configuredOptionOptionSetting.Name) { - configuredOptionSetting = configuredOptionOptionSetting - break - } - } - } - - if configuredOptionSetting == nil && aws.StringValue(apiOptionSetting.Value) == aws.StringValue(apiOptionSetting.DefaultValue) { - continue - } - - optionSetting := map[string]interface{}{ - names.AttrName: aws.StringValue(apiOptionSetting.Name), - names.AttrValue: aws.StringValue(apiOptionSetting.Value), - } - - // Some values, like passwords, are sent back from the API as ****. - // Set the response to match the configuration to prevent an unexpected difference - if configuredOptionSetting != nil && aws.StringValue(apiOptionSetting.Value) == "****" { - optionSetting[names.AttrValue] = aws.StringValue(configuredOptionSetting.Value) - } - - optionSettings = append(optionSettings, optionSetting) - } - optionSettingsResource := &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrName: { - Type: schema.TypeString, - Required: true, - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, - }, - }, - } - - vpcSecurityGroupMemberships := make([]interface{}, 0) - for _, vpc := range apiOption.VpcSecurityGroupMemberships { - if vpc != nil { - vpcSecurityGroupMemberships = append(vpcSecurityGroupMemberships, aws.StringValue(vpc.VpcSecurityGroupId)) - } - } - - r := map[string]interface{}{ - "db_security_group_memberships": schema.NewSet(schema.HashString, dbSecurityGroupMemberships), - "option_name": aws.StringValue(apiOption.OptionName), - "option_settings": schema.NewSet(schema.HashResource(optionSettingsResource), optionSettings), - "vpc_security_group_memberships": schema.NewSet(schema.HashString, vpcSecurityGroupMemberships), - } - - if apiOption.OptionVersion != nil && configuredOption != nil && configuredOption.OptionVersion != nil { - r[names.AttrVersion] = aws.StringValue(apiOption.OptionVersion) + if tfMap[names.AttrName].(string) == "" { + continue } - if apiOption.Port != nil && configuredOption != nil && configuredOption.Port != nil { - r[names.AttrPort] = aws.Int64Value(apiOption.Port) + apiObject := types.Parameter{ + ParameterName: aws.String(strings.ToLower(tfMap[names.AttrName].(string))), + ParameterValue: aws.String(tfMap[names.AttrValue].(string)), } - result = append(result, r) - } - - return result -} - -func expandOptionSetting(list []interface{}) []*rds.OptionSetting { - options := make([]*rds.OptionSetting, 0, len(list)) - - for _, oRaw := range list { - data := oRaw.(map[string]interface{}) - - o := &rds.OptionSetting{ - Name: aws.String(data[names.AttrName].(string)), - Value: aws.String(data[names.AttrValue].(string)), + if v, ok := tfMap["apply_method"].(string); ok && v != "" { + apiObject.ApplyMethod = types.ApplyMethod(strings.ToLower(v)) } - options = append(options, o) + apiObjects = append(apiObjects, apiObject) } - return options + return apiObjects } -// Takes the result of flatmap.Expand for an array of parameters and -// returns Parameter API compatible objects -func expandParameters(configured []interface{}) []*rds.Parameter { - var parameters []*rds.Parameter - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) +func flattenParameters(apiObject []types.Parameter) []interface{} { + apiObjects := make([]interface{}, 0) - if data[names.AttrName].(string) == "" { + for _, apiObject := range apiObject { + if apiObject.ParameterName == nil { continue } - p := &rds.Parameter{ - ParameterName: aws.String(strings.ToLower(data[names.AttrName].(string))), - ParameterValue: aws.String(data[names.AttrValue].(string)), - } - - if data["apply_method"].(string) != "" { - p.ApplyMethod = aws.String(strings.ToLower(data["apply_method"].(string))) - } - - parameters = append(parameters, p) - } - - return parameters -} + tfMap := make(map[string]interface{}) -// Flattens an array of Parameters into a []map[string]interface{} -func flattenParameters(list []*rds.Parameter) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - if i.ParameterName != nil { - r := make(map[string]interface{}) - if i.ApplyMethod != nil { - r["apply_method"] = strings.ToLower(aws.StringValue(i.ApplyMethod)) - } + tfMap["apply_method"] = strings.ToLower(string(apiObject.ApplyMethod)) + tfMap[names.AttrName] = strings.ToLower(aws.ToString(apiObject.ParameterName)) - r[names.AttrName] = strings.ToLower(aws.StringValue(i.ParameterName)) - - // Default empty string, guard against nil parameter values - r[names.AttrValue] = "" - if i.ParameterValue != nil { - r[names.AttrValue] = aws.StringValue(i.ParameterValue) - } - - result = append(result, r) + // Default empty string, guard against nil parameter values. + tfMap[names.AttrValue] = "" + if apiObject.ParameterValue != nil { + tfMap[names.AttrValue] = aws.ToString(apiObject.ParameterValue) } + + apiObjects = append(apiObjects, tfMap) } - return result + return apiObjects } diff --git a/internal/service/rds/flex_test.go b/internal/service/rds/flex_test.go deleted file mode 100644 index 75492b43d70..00000000000 --- a/internal/service/rds/flex_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package rds - -import ( - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform-provider-aws/names" -) - -func TestExpandParameters(t *testing.T) { - t.Parallel() - - expanded := []interface{}{ - map[string]interface{}{ - names.AttrName: "character_set_client", - names.AttrValue: "utf8", - "apply_method": "immediate", - }, - } - parameters := expandParameters(expanded) - - expected := &rds.Parameter{ - ParameterName: aws.String("character_set_client"), - ParameterValue: aws.String("utf8"), - ApplyMethod: aws.String("immediate"), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - -func TestFlattenParameters(t *testing.T) { - t.Parallel() - - cases := []struct { - Input []*rds.Parameter - Output []map[string]interface{} - }{ - { - Input: []*rds.Parameter{ - { - ParameterName: aws.String("character_set_client"), - ParameterValue: aws.String("utf8"), - }, - }, - Output: []map[string]interface{}{ - { - names.AttrName: "character_set_client", - names.AttrValue: "utf8", - }, - }, - }, - { - Input: []*rds.Parameter{ - { - ParameterName: aws.String("character_set_client"), - ParameterValue: aws.String("utf8"), - ApplyMethod: aws.String("immediate"), - }, - }, - Output: []map[string]interface{}{ - { - names.AttrName: "character_set_client", - names.AttrValue: "utf8", - "apply_method": "immediate", - }, - }, - }, - } - - for _, tc := range cases { - output := flattenParameters(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} diff --git a/internal/service/rds/id.go b/internal/service/rds/id.go deleted file mode 100644 index fefb91a364a..00000000000 --- a/internal/service/rds/id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package rds - -import ( - "fmt" - "strings" -) - -const clusterRoleAssociationResourceIDSeparator = "," - -func ClusterRoleAssociationCreateResourceID(dbClusterID, roleARN string) string { - parts := []string{dbClusterID, roleARN} - id := strings.Join(parts, clusterRoleAssociationResourceIDSeparator) - - return id -} - -func ClusterRoleAssociationParseResourceID(id string) (string, string, error) { - parts := strings.Split(id, clusterRoleAssociationResourceIDSeparator) - - if len(parts) == 2 && parts[0] != "" && parts[1] != "" { - return parts[0], parts[1], nil - } - - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected DBCLUSTERID%[2]sROLEARN", id, clusterRoleAssociationResourceIDSeparator) -} diff --git a/internal/service/rds/instance_test.go b/internal/service/rds/instance_test.go index e41a8e8f2b7..e16ef0626ab 100644 --- a/internal/service/rds/instance_test.go +++ b/internal/service/rds/instance_test.go @@ -14,8 +14,8 @@ import ( "github.com/YakDriver/regexache" rds_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/rds" tfawserr_sdkv2 "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -349,7 +349,7 @@ func TestAccRDSInstance_customIAMInstanceProfile(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) + acctest.PreCheckPartitionNot(t, names.USGovCloudPartitionID) }, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -2387,7 +2387,7 @@ func TestAccRDSInstance_SnapshotIdentifier_basic(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2432,7 +2432,7 @@ func TestAccRDSInstance_SnapshotIdentifier_ManageMasterPasswordKMSKey(t *testing } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2599,7 +2599,7 @@ func TestAccRDSInstance_SnapshotIdentifier_allocatedStorage(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2632,7 +2632,7 @@ func TestAccRDSInstance_SnapshotIdentifier_io1Storage(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2665,7 +2665,7 @@ func TestAccRDSInstance_SnapshotIdentifier_io2Storage(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2698,7 +2698,7 @@ func TestAccRDSInstance_SnapshotIdentifier_allowMajorVersionUpgrade(t *testing.T } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2731,7 +2731,7 @@ func TestAccRDSInstance_SnapshotIdentifier_autoMinorVersionUpgrade(t *testing.T) } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2764,7 +2764,7 @@ func TestAccRDSInstance_SnapshotIdentifier_availabilityZone(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2796,7 +2796,7 @@ func TestAccRDSInstance_SnapshotIdentifier_backupRetentionPeriodOverride(t *test } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2829,7 +2829,7 @@ func TestAccRDSInstance_SnapshotIdentifier_backupRetentionPeriodUnset(t *testing } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2862,7 +2862,7 @@ func TestAccRDSInstance_SnapshotIdentifier_backupWindow(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -2895,7 +2895,7 @@ func TestAccRDSInstance_SnapshotIdentifier_dbSubnetGroupName(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dbSubnetGroupResourceName := "aws_db_subnet_group.test" sourceDbResourceName := "aws_db_instance.source" @@ -2928,7 +2928,7 @@ func TestAccRDSInstance_SnapshotIdentifier_dbSubnetGroupNameRAMShared(t *testing } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dbSubnetGroupResourceName := "aws_db_subnet_group.test" sourceDbResourceName := "aws_db_instance.source" @@ -2965,7 +2965,7 @@ func TestAccRDSInstance_SnapshotIdentifier_dbSubnetGroupNameVPCSecurityGroupIDs( } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dbSubnetGroupResourceName := "aws_db_subnet_group.test" sourceDbResourceName := "aws_db_instance.source" @@ -2998,7 +2998,7 @@ func TestAccRDSInstance_SnapshotIdentifier_deletionProtection(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3041,7 +3041,7 @@ func TestAccRDSInstance_SnapshotIdentifier_iamDatabaseAuthenticationEnabled(t *t } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3074,7 +3074,7 @@ func TestAccRDSInstance_SnapshotIdentifier_maintenanceWindow(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3107,7 +3107,7 @@ func TestAccRDSInstance_SnapshotIdentifier_maxAllocatedStorage(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3140,7 +3140,7 @@ func TestAccRDSInstance_SnapshotIdentifier_monitoring(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3173,7 +3173,7 @@ func TestAccRDSInstance_SnapshotIdentifier_multiAZ(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3206,7 +3206,7 @@ func TestAccRDSInstance_SnapshotIdentifier_multiAZSQLServer(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3240,7 +3240,7 @@ func TestAccRDSInstance_SnapshotIdentifier_parameterGroupName(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3274,7 +3274,7 @@ func TestAccRDSInstance_SnapshotIdentifier_port(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3307,7 +3307,7 @@ func TestAccRDSInstance_SnapshotIdentifier_tags(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3351,7 +3351,7 @@ func TestAccRDSInstance_SnapshotIdentifier_tagsRemove(t *testing.T) { ctx := acctest.Context(t) var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3393,7 +3393,7 @@ func TestAccRDSInstance_SnapshotIdentifier_vpcSecurityGroupIDs(t *testing.T) { } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -3429,7 +3429,7 @@ func TestAccRDSInstance_SnapshotIdentifier_vpcSecurityGroupIDsTags(t *testing.T) } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.source" @@ -4643,7 +4643,7 @@ func TestAccRDSInstance_SnapshotIdentifier_performanceInsightsEnabled(t *testing } var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) kmsKeyResourceName := "aws_kms_key.test" @@ -5093,7 +5093,7 @@ func TestAccRDSInstance_Outposts_coIPRestoreToPointInTime(t *testing.T) { func TestAccRDSInstance_Outposts_coIPSnapshotIdentifier(t *testing.T) { ctx := acctest.Context(t) var dbInstance, sourceDbInstance rds.DBInstance - var dbSnapshot rds.DBSnapshot + var dbSnapshot types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) sourceDbResourceName := "aws_db_instance.test" @@ -6453,7 +6453,8 @@ func testAccCheckInstanceReplicaAttributes(source, replica *rds.DBInstance) reso // The snapshot is deleted. func testAccCheckInstanceDestroyWithFinalSnapshot(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn1 := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn2 := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_instance" { @@ -6461,12 +6462,12 @@ func testAccCheckInstanceDestroyWithFinalSnapshot(ctx context.Context) resource. } finalSnapshotID := rs.Primary.Attributes[names.AttrFinalSnapshotIdentifier] - output, err := tfrds.FindDBSnapshotByID(ctx, conn, finalSnapshotID) + output, err := tfrds.FindDBSnapshotByID(ctx, conn2, finalSnapshotID) if err != nil { return err } - tags, err := tfrds.ListTags(ctx, conn, aws.StringValue(output.DBSnapshotArn)) + tags, err := tfrds.ListTags(ctx, conn1, aws.StringValue(output.DBSnapshotArn)) if err != nil { return err } @@ -6475,7 +6476,7 @@ func testAccCheckInstanceDestroyWithFinalSnapshot(ctx context.Context) resource. return fmt.Errorf("Name tag not found") } - _, err = conn.DeleteDBSnapshotWithContext(ctx, &rds.DeleteDBSnapshotInput{ + _, err = conn2.DeleteDBSnapshot(ctx, &rds_sdkv2.DeleteDBSnapshotInput{ DBSnapshotIdentifier: aws.String(finalSnapshotID), }) @@ -6483,7 +6484,7 @@ func testAccCheckInstanceDestroyWithFinalSnapshot(ctx context.Context) resource. return err } - _, err = tfrds.FindDBInstanceByID(ctx, conn, rs.Primary.Attributes[names.AttrIdentifier]) + _, err = tfrds.FindDBInstanceByID(ctx, conn1, rs.Primary.Attributes[names.AttrIdentifier]) if tfresource.NotFound(err) { continue @@ -6505,7 +6506,8 @@ func testAccCheckInstanceDestroyWithFinalSnapshot(ctx context.Context) resource. // - No DBSnapshot has been produced func testAccCheckInstanceDestroyWithoutFinalSnapshot(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn1 := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn2 := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_instance" { @@ -6513,7 +6515,7 @@ func testAccCheckInstanceDestroyWithoutFinalSnapshot(ctx context.Context) resour } finalSnapshotID := rs.Primary.Attributes[names.AttrFinalSnapshotIdentifier] - _, err := tfrds.FindDBSnapshotByID(ctx, conn, finalSnapshotID) + _, err := tfrds.FindDBSnapshotByID(ctx, conn2, finalSnapshotID) if err != nil { if !tfresource.NotFound(err) { @@ -6523,7 +6525,7 @@ func testAccCheckInstanceDestroyWithoutFinalSnapshot(ctx context.Context) resour return fmt.Errorf("RDS DB Snapshot %s exists", finalSnapshotID) } - _, err = tfrds.FindDBInstanceByID(ctx, conn, rs.Primary.Attributes[names.AttrIdentifier]) + _, err = tfrds.FindDBInstanceByID(ctx, conn1, rs.Primary.Attributes[names.AttrIdentifier]) if tfresource.NotFound(err) { continue diff --git a/internal/service/rds/option_group.go b/internal/service/rds/option_group.go index 0eba392459b..fb390899940 100644 --- a/internal/service/rds/option_group.go +++ b/internal/service/rds/option_group.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -344,3 +345,165 @@ func flattenOptionNames(configured []interface{}) []*string { return optionNames } + +func expandOptionConfiguration(configured []interface{}) []*rds.OptionConfiguration { + var option []*rds.OptionConfiguration + + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + o := &rds.OptionConfiguration{ + OptionName: aws.String(data["option_name"].(string)), + } + + if raw, ok := data[names.AttrPort]; ok { + port := raw.(int) + if port != 0 { + o.Port = aws.Int64(int64(port)) + } + } + + if raw, ok := data["db_security_group_memberships"]; ok { + memberships := flex.ExpandStringSet(raw.(*schema.Set)) + if len(memberships) > 0 { + o.DBSecurityGroupMemberships = memberships + } + } + + if raw, ok := data["vpc_security_group_memberships"]; ok { + memberships := flex.ExpandStringSet(raw.(*schema.Set)) + if len(memberships) > 0 { + o.VpcSecurityGroupMemberships = memberships + } + } + + if raw, ok := data["option_settings"]; ok { + o.OptionSettings = expandOptionSetting(raw.(*schema.Set).List()) + } + + if raw, ok := data[names.AttrVersion]; ok && raw.(string) != "" { + o.OptionVersion = aws.String(raw.(string)) + } + + option = append(option, o) + } + + return option +} + +// Flattens an array of Options into a []map[string]interface{} +func flattenOptions(apiOptions []*rds.Option, optionConfigurations []*rds.OptionConfiguration) []map[string]interface{} { + result := make([]map[string]interface{}, 0) + + for _, apiOption := range apiOptions { + if apiOption == nil || apiOption.OptionName == nil { + continue + } + + var configuredOption *rds.OptionConfiguration + + for _, optionConfiguration := range optionConfigurations { + if aws.StringValue(apiOption.OptionName) == aws.StringValue(optionConfiguration.OptionName) { + configuredOption = optionConfiguration + break + } + } + + dbSecurityGroupMemberships := make([]interface{}, 0) + for _, db := range apiOption.DBSecurityGroupMemberships { + if db != nil { + dbSecurityGroupMemberships = append(dbSecurityGroupMemberships, aws.StringValue(db.DBSecurityGroupName)) + } + } + + optionSettings := make([]interface{}, 0) + for _, apiOptionSetting := range apiOption.OptionSettings { + // The RDS API responds with all settings. Omit settings that match default value, + // but only if unconfigured. This is to prevent operators from continually needing + // to continually update their Terraform configurations to match new option settings + // when added by the API. + var configuredOptionSetting *rds.OptionSetting + + if configuredOption != nil { + for _, configuredOptionOptionSetting := range configuredOption.OptionSettings { + if aws.StringValue(apiOptionSetting.Name) == aws.StringValue(configuredOptionOptionSetting.Name) { + configuredOptionSetting = configuredOptionOptionSetting + break + } + } + } + + if configuredOptionSetting == nil && aws.StringValue(apiOptionSetting.Value) == aws.StringValue(apiOptionSetting.DefaultValue) { + continue + } + + optionSetting := map[string]interface{}{ + names.AttrName: aws.StringValue(apiOptionSetting.Name), + names.AttrValue: aws.StringValue(apiOptionSetting.Value), + } + + // Some values, like passwords, are sent back from the API as ****. + // Set the response to match the configuration to prevent an unexpected difference + if configuredOptionSetting != nil && aws.StringValue(apiOptionSetting.Value) == "****" { + optionSetting[names.AttrValue] = aws.StringValue(configuredOptionSetting.Value) + } + + optionSettings = append(optionSettings, optionSetting) + } + optionSettingsResource := &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrName: { + Type: schema.TypeString, + Required: true, + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + }, + }, + } + + vpcSecurityGroupMemberships := make([]interface{}, 0) + for _, vpc := range apiOption.VpcSecurityGroupMemberships { + if vpc != nil { + vpcSecurityGroupMemberships = append(vpcSecurityGroupMemberships, aws.StringValue(vpc.VpcSecurityGroupId)) + } + } + + r := map[string]interface{}{ + "db_security_group_memberships": schema.NewSet(schema.HashString, dbSecurityGroupMemberships), + "option_name": aws.StringValue(apiOption.OptionName), + "option_settings": schema.NewSet(schema.HashResource(optionSettingsResource), optionSettings), + "vpc_security_group_memberships": schema.NewSet(schema.HashString, vpcSecurityGroupMemberships), + } + + if apiOption.OptionVersion != nil && configuredOption != nil && configuredOption.OptionVersion != nil { + r[names.AttrVersion] = aws.StringValue(apiOption.OptionVersion) + } + + if apiOption.Port != nil && configuredOption != nil && configuredOption.Port != nil { + r[names.AttrPort] = aws.Int64Value(apiOption.Port) + } + + result = append(result, r) + } + + return result +} + +func expandOptionSetting(list []interface{}) []*rds.OptionSetting { + options := make([]*rds.OptionSetting, 0, len(list)) + + for _, oRaw := range list { + data := oRaw.(map[string]interface{}) + + o := &rds.OptionSetting{ + Name: aws.String(data[names.AttrName].(string)), + Value: aws.String(data[names.AttrValue].(string)), + } + + options = append(options, o) + } + + return options +} diff --git a/internal/service/rds/parameter_group.go b/internal/service/rds/parameter_group.go index b870ffe2bc6..9dc8d4ed15a 100644 --- a/internal/service/rds/parameter_group.go +++ b/internal/service/rds/parameter_group.go @@ -11,18 +11,19 @@ import ( "strings" "time" - rds_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" "github.com/aws/aws-sdk-go-v2/service/rds/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -32,7 +33,7 @@ import ( // @SDKResource("aws_db_parameter_group", name="DB Parameter Group") // @Tags(identifierAttribute="arn") // @Testing(tagsTest=false) -func ResourceParameterGroup() *schema.Resource { +func resourceParameterGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceParameterGroupCreate, ReadWithoutTimeout: resourceParameterGroupRead, @@ -81,9 +82,10 @@ func ResourceParameterGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "apply_method": { - Type: schema.TypeString, - Optional: true, - Default: "immediate", + Type: schema.TypeString, + Optional: true, + Default: types.ApplyMethodImmediate, + ValidateDiagFunc: enum.ValidateIgnoreCase[types.ApplyMethod](), }, names.AttrName: { Type: schema.TypeString, @@ -107,24 +109,25 @@ func ResourceParameterGroup() *schema.Resource { func resourceParameterGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) name := create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) input := &rds.CreateDBParameterGroupInput{ DBParameterGroupFamily: aws.String(d.Get(names.AttrFamily).(string)), DBParameterGroupName: aws.String(name), Description: aws.String(d.Get(names.AttrDescription).(string)), - Tags: getTagsIn(ctx), + Tags: getTagsInV2(ctx), } - output, err := conn.CreateDBParameterGroupWithContext(ctx, input) + output, err := conn.CreateDBParameterGroup(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "creatingDB Parameter Group (%s): %s", name, err) + return sdkdiag.AppendErrorf(diags, "creating RDS DB Parameter Group (%s): %s", name, err) } - d.SetId(aws.StringValue(output.DBParameterGroup.DBParameterGroupName)) + d.SetId(aws.ToString(output.DBParameterGroup.DBParameterGroupName)) - // Set for update + // Set for update. d.Set(names.AttrARN, output.DBParameterGroup.DBParameterGroupArn) return append(diags, resourceParameterGroupUpdate(ctx, d, meta)...) @@ -132,9 +135,9 @@ func resourceParameterGroupCreate(ctx context.Context, d *schema.ResourceData, m func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) - dbParameterGroup, err := FindDBParameterGroupByName(ctx, conn, d.Id()) + dbParameterGroup, err := findDBParameterGroupByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] RDS DB Parameter Group (%s) not found, removing from state", d.Id()) @@ -146,8 +149,7 @@ func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "reading RDS DB Parameter Group (%s): %s", d.Id(), err) } - arn := aws.StringValue(dbParameterGroup.DBParameterGroupArn) - d.Set(names.AttrARN, arn) + d.Set(names.AttrARN, dbParameterGroup.DBParameterGroupArn) d.Set(names.AttrDescription, dbParameterGroup.Description) d.Set(names.AttrFamily, dbParameterGroup.DBParameterGroupFamily) d.Set(names.AttrName, dbParameterGroup.DBParameterGroupName) @@ -158,7 +160,7 @@ func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, met configParams := d.Get(names.AttrParameter).(*schema.Set) if configParams.Len() < 1 { - // if we don't have any params in the ResourceData already, two possibilities + // If we don't have any params in the ResourceData already, two possibilities // first, we don't have a config available to us. Second, we do, but it has // no parameters. We're going to assume the first, to be safe. In this case, // we're only going to ask for the user-modified values, because any defaults @@ -167,26 +169,22 @@ func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, met // an empty list anyways, so we just make some unnecessary requests. But in // the more common case (I assume) of an import, this will make fewer requests // and "do the right thing". - input.Source = aws.String("user") + input.Source = aws.String(parameterSourceUser) } - var parameters []*rds.Parameter - err = conn.DescribeDBParametersPagesWithContext(ctx, input, func(page *rds.DescribeDBParametersOutput, lastPage bool) bool { - parameters = append(parameters, page.Parameters...) - return !lastPage - }) + parameters, err := findDBParameters(ctx, conn, input, tfslices.PredicateTrue[*types.Parameter]()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading RDS DB Parameter Group (%s) parameters: %s", d.Id(), err) } - var userParams []*rds.Parameter + var userParams []types.Parameter if configParams.Len() < 1 { - // if we have no config/no parameters in config, we've already asked for only + // If we have no config/no parameters in config, we've already asked for only // user-modified values, so we can just use the entire response. userParams = parameters } else { - // if we have a config available to us, we have two possible classes of value + // If we have a config available to us, we have two possible classes of value // in the config. On the one hand, the user could have specified a parameter // that _actually_ changed things, in which case its Source would be set to // user. On the other, they may have specified a parameter that coincides with @@ -195,27 +193,30 @@ func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, met // _and_ the "system"/"engine-default" Source parameters _that appear in the // config_ in the state, or the user gets a perpetual diff. See // terraform-providers/terraform-provider-aws#593 for more context and details. - confParams := expandParameters(configParams.List()) - for _, param := range parameters { - if param.Source == nil || param.ParameterName == nil { + for _, parameter := range parameters { + if parameter.Source == nil || parameter.ParameterName == nil { continue } - if aws.StringValue(param.Source) == "user" { - userParams = append(userParams, param) + + if aws.ToString(parameter.Source) == parameterSourceUser { + userParams = append(userParams, parameter) continue } + var paramFound bool - for _, cp := range confParams { + for _, cp := range expandParameters(configParams.List()) { if cp.ParameterName == nil { continue } - if aws.StringValue(cp.ParameterName) == aws.StringValue(param.ParameterName) { - userParams = append(userParams, param) + + if aws.ToString(cp.ParameterName) == aws.ToString(parameter.ParameterName) { + userParams = append(userParams, parameter) + paramFound = true break } } if !paramFound { - log.Printf("[DEBUG] Not persisting %s to state, as its source is %q and it isn't in the config", aws.StringValue(param.ParameterName), aws.StringValue(param.Source)) + log.Printf("[DEBUG] Not persisting %s to state, as its source is %q and it isn't in the config", aws.ToString(parameter.ParameterName), aws.ToString(parameter.Source)) } } } @@ -232,65 +233,50 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m maxParamModifyChunk = 20 ) var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) if d.HasChange(names.AttrParameter) { o, n := d.GetChange(names.AttrParameter) - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) + os, ns := o.(*schema.Set), n.(*schema.Set) - // Expand the "parameter" set to aws-sdk-go compat []rds.Parameter - parameters := expandParameters(ns.Difference(os).List()) - - if len(parameters) > 0 { + if parameters := expandParameters(ns.Difference(os).List()); len(parameters) > 0 { // We can only modify 20 parameters at a time, so walk them until // we've got them all. - for parameters != nil { - var paramsToModify []*rds.Parameter - paramsToModify, parameters = ResourceParameterModifyChunk(parameters, maxParamModifyChunk) + var paramsToModify []types.Parameter + paramsToModify, parameters = parameterGroupModifyChunk(parameters, maxParamModifyChunk) input := &rds.ModifyDBParameterGroupInput{ DBParameterGroupName: aws.String(d.Id()), Parameters: paramsToModify, } - _, err := conn.ModifyDBParameterGroupWithContext(ctx, input) + _, err := conn.ModifyDBParameterGroup(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying DB Parameter Group (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying RDS DB Parameter Group (%s): %s", d.Id(), err) } } } - toRemove := map[string]*rds.Parameter{} + toRemove := map[string]types.Parameter{} for _, p := range expandParameters(os.List()) { if p.ParameterName != nil { - toRemove[*p.ParameterName] = p + toRemove[aws.ToString(p.ParameterName)] = p } } for _, p := range expandParameters(ns.List()) { if p.ParameterName != nil { - delete(toRemove, *p.ParameterName) + delete(toRemove, aws.ToString(p.ParameterName)) } } - // Reset parameters that have been removed - var resetParameters []*rds.Parameter - for _, v := range toRemove { - resetParameters = append(resetParameters, v) - } - if len(resetParameters) > 0 { + // Reset parameters that have been removed. + if resetParameters := tfmaps.Values(toRemove); len(resetParameters) > 0 { for resetParameters != nil { - var paramsToReset []*rds.Parameter + var paramsToReset []types.Parameter if len(resetParameters) <= maxParamModifyChunk { paramsToReset, resetParameters = resetParameters[:], nil } else { @@ -303,9 +289,10 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m ResetAllParameters: aws.Bool(false), } - _, err := conn.ResetDBParameterGroupWithContext(ctx, input) + _, err := conn.ResetDBParameterGroup(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "resetting DB Parameter Group (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "resetting RDS DB Parameter Group (%s): %s", d.Id(), err) } } } @@ -314,70 +301,115 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m return append(diags, resourceParameterGroupRead(ctx, d, meta)...) } -func resourceParameterGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) (diags diag.Diagnostics) { +func resourceParameterGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics conn := meta.(*conns.AWSClient).RDSClient(ctx) - input := &rds_sdkv2.DeleteDBParameterGroupInput{ - DBParameterGroupName: aws.String(d.Id()), - } log.Printf("[DEBUG] Deleting RDS DB Parameter Group: %s", d.Id()) - err := retry.RetryContext(ctx, 3*time.Minute, func() *retry.RetryError { - _, err := conn.DeleteDBParameterGroup(ctx, input) - if errs.IsA[*types.DBParameterGroupNotFoundFault](err) { - return nil - } else if errs.IsA[*types.InvalidDBParameterGroupStateFault](err) { - return retry.RetryableError(err) - } - if err != nil { - return retry.NonRetryableError(err) - } - return nil + const ( + timeout = 3 * time.Minute + ) + _, err := tfresource.RetryWhenIsA[*types.InvalidDBParameterGroupStateFault](ctx, timeout, func() (interface{}, error) { + return conn.DeleteDBParameterGroup(ctx, &rds.DeleteDBParameterGroupInput{ + DBParameterGroupName: aws.String(d.Id()), + }) }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteDBParameterGroup(ctx, input) + + if errs.IsA[*types.DBParameterGroupNotFoundFault](err) { + return diags } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting RDS DB Parameter Group (%s): %s", d.Id(), err) } + return diags } -func FindDBParameterGroupByName(ctx context.Context, conn *rds.RDS, name string) (*rds.DBParameterGroup, error) { +func findDBParameterGroupByName(ctx context.Context, conn *rds.Client, name string) (*types.DBParameterGroup, error) { input := &rds.DescribeDBParameterGroupsInput{ DBParameterGroupName: aws.String(name), } + output, err := findDBParameterGroup(ctx, conn, input, tfslices.PredicateTrue[*types.DBParameterGroup]()) - output, err := conn.DescribeDBParameterGroupsWithContext(ctx, input) + if err != nil { + return nil, err + } - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBParameterGroupNotFoundFault) { + // Eventual consistency check. + if aws.ToString(output.DBParameterGroupName) != name { return nil, &retry.NotFoundError{ - LastError: err, LastRequest: input, } } + return output, nil +} + +func findDBParameterGroup(ctx context.Context, conn *rds.Client, input *rds.DescribeDBParameterGroupsInput, filter tfslices.Predicate[*types.DBParameterGroup]) (*types.DBParameterGroup, error) { + output, err := findDBParameterGroups(ctx, conn, input, filter) + if err != nil { return nil, err } - if output == nil || len(output.DBParameterGroups) == 0 || output.DBParameterGroups[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } + return tfresource.AssertSingleValueResult(output) +} + +func findDBParameterGroups(ctx context.Context, conn *rds.Client, input *rds.DescribeDBParameterGroupsInput, filter tfslices.Predicate[*types.DBParameterGroup]) ([]types.DBParameterGroup, error) { + var output []types.DBParameterGroup + + pages := rds.NewDescribeDBParameterGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*types.DBParameterGroupNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } - if count := len(output.DBParameterGroups); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) + for _, v := range page.DBParameterGroups { + if filter(&v) { + output = append(output, v) + } + } } - dbParameterGroup := output.DBParameterGroups[0] + return output, nil +} - // Eventual consistency check. - if aws.StringValue(dbParameterGroup.DBParameterGroupName) != name { - return nil, &retry.NotFoundError{ - LastRequest: input, +func findDBParameters(ctx context.Context, conn *rds.Client, input *rds.DescribeDBParametersInput, filter tfslices.Predicate[*types.Parameter]) ([]types.Parameter, error) { + var output []types.Parameter + + pages := rds.NewDescribeDBParametersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*types.DBParameterGroupNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.Parameters { + if filter(&v) { + output = append(output, v) + } } } - return dbParameterGroup, nil + return output, nil } func resourceParameterHash(v interface{}) int { @@ -393,15 +425,15 @@ func resourceParameterHash(v interface{}) int { return create.StringHashcode(buf.String()) } -func ResourceParameterModifyChunk(all []*rds.Parameter, maxChunkSize int) ([]*rds.Parameter, []*rds.Parameter) { +func parameterGroupModifyChunk(all []types.Parameter, maxChunkSize int) ([]types.Parameter, []types.Parameter) { // Since the hash randomly affect the set "order," this attempts to prioritize important - // parameters to go in the first chunk (i.e., charset) + // parameters to go in the first chunk (i.e., charset). if len(all) <= maxChunkSize { return all[:], nil } - var modifyChunk, remainder []*rds.Parameter + var modifyChunk, remainder []types.Parameter // pass 1 for i, p := range all { @@ -410,7 +442,7 @@ func ResourceParameterModifyChunk(all []*rds.Parameter, maxChunkSize int) ([]*rd return modifyChunk, remainder } - if strings.Contains(aws.StringValue(p.ParameterName), "character_set") && aws.StringValue(p.ApplyMethod) != "pending-reboot" { + if strings.Contains(aws.ToString(p.ParameterName), "character_set") && p.ApplyMethod != types.ApplyMethodPendingReboot { modifyChunk = append(modifyChunk, p) continue } @@ -428,7 +460,7 @@ func ResourceParameterModifyChunk(all []*rds.Parameter, maxChunkSize int) ([]*rd return modifyChunk, remainder } - if aws.StringValue(p.ApplyMethod) != "pending-reboot" { + if p.ApplyMethod != types.ApplyMethodPendingReboot { modifyChunk = append(modifyChunk, p) continue } diff --git a/internal/service/rds/parameter_group_test.go b/internal/service/rds/parameter_group_test.go index f89d2e39c4b..c4af565f1f9 100644 --- a/internal/service/rds/parameter_group_test.go +++ b/internal/service/rds/parameter_group_test.go @@ -10,8 +10,9 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -24,7 +25,7 @@ import ( func TestAccRDSParameterGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resourceName := "aws_db_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -119,7 +120,7 @@ func TestAccRDSParameterGroup_basic(t *testing.T) { func TestAccRDSParameterGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resourceName := "aws_db_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -143,7 +144,7 @@ func TestAccRDSParameterGroup_disappears(t *testing.T) { func TestAccRDSParameterGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resourceName := "aws_db_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -207,7 +208,7 @@ func TestAccRDSParameterGroup_caseWithMixedParameters(t *testing.T) { func TestAccRDSParameterGroup_limit(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resourceName := "aws_db_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -576,7 +577,7 @@ func TestAccRDSParameterGroup_limit(t *testing.T) { func TestAccRDSParameterGroup_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -597,7 +598,7 @@ func TestAccRDSParameterGroup_namePrefix(t *testing.T) { func TestAccRDSParameterGroup_generatedName(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -617,7 +618,7 @@ func TestAccRDSParameterGroup_generatedName(t *testing.T) { func TestAccRDSParameterGroup_withApplyMethod(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resourceName := "aws_db_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -658,7 +659,7 @@ func TestAccRDSParameterGroup_withApplyMethod(t *testing.T) { func TestAccRDSParameterGroup_only(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resourceName := "aws_db_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -688,7 +689,7 @@ func TestAccRDSParameterGroup_only(t *testing.T) { func TestAccRDSParameterGroup_matchDefault(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resourceName := "aws_db_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -718,7 +719,7 @@ func TestAccRDSParameterGroup_matchDefault(t *testing.T) { func TestAccRDSParameterGroup_updateParameters(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resourceName := "aws_db_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -779,7 +780,7 @@ func TestAccRDSParameterGroup_updateParameters(t *testing.T) { func TestAccRDSParameterGroup_caseParameters(t *testing.T) { ctx := acctest.Context(t) - var v rds.DBParameterGroup + var v types.DBParameterGroup resourceName := "aws_db_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -803,9 +804,10 @@ func TestAccRDSParameterGroup_caseParameters(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parameter.0.name"}, }, { Config: testAccParameterGroupConfig_upperCase(rName, "max_connections"), @@ -814,15 +816,15 @@ func TestAccRDSParameterGroup_caseParameters(t *testing.T) { }) } -func TestDBParameterModifyChunk(t *testing.T) { +func TestParameterGroupModifyChunk(t *testing.T) { t.Parallel() cases := []struct { Name string ChunkSize int - Parameters []*rds.Parameter - ExpectedModify []*rds.Parameter - ExpectedRemainder []*rds.Parameter + Parameters []types.Parameter + ExpectedModify []types.Parameter + ExpectedRemainder []types.Parameter }{ { Name: "Empty", @@ -834,26 +836,26 @@ func TestDBParameterModifyChunk(t *testing.T) { { Name: "A couple", ChunkSize: 20, - Parameters: []*rds.Parameter{ + Parameters: []types.Parameter{ { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("tx_isolation"), ParameterValue: aws.String("repeatable-read"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("binlog_cache_size"), ParameterValue: aws.String("131072"), }, }, - ExpectedModify: []*rds.Parameter{ + ExpectedModify: []types.Parameter{ { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("tx_isolation"), ParameterValue: aws.String("repeatable-read"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("binlog_cache_size"), ParameterValue: aws.String("131072"), }, @@ -863,68 +865,68 @@ func TestDBParameterModifyChunk(t *testing.T) { { Name: "Over 3 max, 6 in", ChunkSize: 3, - Parameters: []*rds.Parameter{ + Parameters: []types.Parameter{ { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("tx_isolation"), ParameterValue: aws.String("repeatable-read"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("binlog_cache_size"), ParameterValue: aws.String("131072"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("innodb_read_io_threads"), ParameterValue: aws.String("64"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("character_set_server"), ParameterValue: aws.String("utf8"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("innodb_flush_log_at_trx_commit"), ParameterValue: aws.String(acctest.Ct0), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("character_set_filesystem"), ParameterValue: aws.String("utf8"), }, }, - ExpectedModify: []*rds.Parameter{ + ExpectedModify: []types.Parameter{ { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("character_set_server"), ParameterValue: aws.String("utf8"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("character_set_filesystem"), ParameterValue: aws.String("utf8"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("tx_isolation"), ParameterValue: aws.String("repeatable-read"), }, }, - ExpectedRemainder: []*rds.Parameter{ + ExpectedRemainder: []types.Parameter{ { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("binlog_cache_size"), ParameterValue: aws.String("131072"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("innodb_read_io_threads"), ParameterValue: aws.String("64"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("innodb_flush_log_at_trx_commit"), ParameterValue: aws.String(acctest.Ct0), }, @@ -933,98 +935,98 @@ func TestDBParameterModifyChunk(t *testing.T) { { Name: "Over 3 max, 9 in", ChunkSize: 3, - Parameters: []*rds.Parameter{ + Parameters: []types.Parameter{ { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("tx_isolation"), ParameterValue: aws.String("repeatable-read"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("binlog_cache_size"), ParameterValue: aws.String("131072"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("innodb_read_io_threads"), ParameterValue: aws.String("64"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("character_set_server"), ParameterValue: aws.String("utf8"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("innodb_flush_log_at_trx_commit"), ParameterValue: aws.String(acctest.Ct0), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("character_set_filesystem"), ParameterValue: aws.String("utf8"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("innodb_max_dirty_pages_pct"), ParameterValue: aws.String("90"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("character_set_connection"), ParameterValue: aws.String("utf8"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("key_buffer_size"), ParameterValue: aws.String("67108864"), }, }, - ExpectedModify: []*rds.Parameter{ + ExpectedModify: []types.Parameter{ { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("character_set_filesystem"), ParameterValue: aws.String("utf8"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("character_set_connection"), ParameterValue: aws.String("utf8"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("innodb_flush_log_at_trx_commit"), ParameterValue: aws.String(acctest.Ct0), }, }, - ExpectedRemainder: []*rds.Parameter{ + ExpectedRemainder: []types.Parameter{ { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("tx_isolation"), ParameterValue: aws.String("repeatable-read"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("binlog_cache_size"), ParameterValue: aws.String("131072"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("innodb_read_io_threads"), ParameterValue: aws.String("64"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("character_set_server"), ParameterValue: aws.String("utf8"), }, { - ApplyMethod: aws.String("pending-reboot"), + ApplyMethod: types.ApplyMethodPendingReboot, ParameterName: aws.String("innodb_max_dirty_pages_pct"), ParameterValue: aws.String("90"), }, { - ApplyMethod: aws.String("immediate"), + ApplyMethod: types.ApplyMethodImmediate, ParameterName: aws.String("key_buffer_size"), ParameterValue: aws.String("67108864"), }, @@ -1033,7 +1035,7 @@ func TestDBParameterModifyChunk(t *testing.T) { } for _, tc := range cases { - mod, rem := tfrds.ResourceParameterModifyChunk(tc.Parameters, tc.ChunkSize) + mod, rem := tfrds.ParameterGroupModifyChunk(tc.Parameters, tc.ChunkSize) if !reflect.DeepEqual(mod, tc.ExpectedModify) { t.Errorf("Case %q: Modify did not match\n%#v\n\nGot:\n%#v", tc.Name, tc.ExpectedModify, mod) } @@ -1045,7 +1047,7 @@ func TestDBParameterModifyChunk(t *testing.T) { func testAccCheckParameterGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_parameter_group" { @@ -1069,35 +1071,32 @@ func testAccCheckParameterGroupDestroy(ctx context.Context) resource.TestCheckFu } } -func testAccCheckParameterGroupAttributes(v *rds.DBParameterGroup, name string) resource.TestCheckFunc { +func testAccCheckParameterGroupAttributes(v *types.DBParameterGroup, name string) resource.TestCheckFunc { return func(s *terraform.State) error { if *v.DBParameterGroupName != name { return fmt.Errorf("Bad Parameter Group name, expected (%s), got (%s)", name, *v.DBParameterGroupName) } family := "mysql5.6" - if aws.StringValue(v.DBParameterGroupFamily) != family { - return fmt.Errorf("bad family, got: %s, expecting: %s", aws.StringValue(v.DBParameterGroupFamily), family) + if aws.ToString(v.DBParameterGroupFamily) != family { + return fmt.Errorf("bad family, got: %s, expecting: %s", aws.ToString(v.DBParameterGroupFamily), family) } return nil } } -func testAccCheckParameterGroupExists(ctx context.Context, n string, v *rds.DBParameterGroup) resource.TestCheckFunc { +func testAccCheckParameterGroupExists(ctx context.Context, n string, v *types.DBParameterGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No RDS DB Parameter Group ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) output, err := tfrds.FindDBParameterGroupByName(ctx, conn, rs.Primary.ID) + if err != nil { return err } @@ -1115,33 +1114,34 @@ func testAccCheckParameterNotUserDefined(ctx context.Context, rName, paramName s return fmt.Errorf("Not found: %s", rName) } - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Parameter Group ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) - opts := rds.DescribeDBParametersInput{ + input := &rds.DescribeDBParametersInput{ DBParameterGroupName: aws.String(rs.Primary.ID), Source: aws.String("user"), } userDefined := false - err := conn.DescribeDBParametersPagesWithContext(ctx, &opts, func(page *rds.DescribeDBParametersOutput, lastPage bool) bool { + pages := rds.NewDescribeDBParametersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return err + } + for _, param := range page.Parameters { - if *param.ParameterName == paramName { + if aws.ToString(param.ParameterName) == paramName { userDefined = true - return false } } - return true - }) + } if userDefined { return fmt.Errorf("DB Parameter is user defined") } - return err + return nil } } diff --git a/internal/service/rds/reserved_instance.go b/internal/service/rds/reserved_instance.go index 45cc771a626..f5121a4791a 100644 --- a/internal/service/rds/reserved_instance.go +++ b/internal/service/rds/reserved_instance.go @@ -8,9 +8,11 @@ import ( "fmt" "time" - "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" @@ -154,7 +156,7 @@ func resourceReservedInstanceCreate(ctx context.Context, d *schema.ResourceData, return create.AppendDiagError(diags, names.RDS, create.ErrActionCreating, ResNameReservedInstance, fmt.Sprintf("offering_id: %s, reservation_id: %s", d.Get("offering_id").(string), d.Get("reservation_id").(string)), err) } - d.SetId(aws.ToString(resp.ReservedDBInstance.ReservedDBInstanceId)) + d.SetId(aws.StringValue(resp.ReservedDBInstance.ReservedDBInstanceId)) if err := waitReservedInstanceCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return create.AppendDiagError(diags, names.RDS, create.ErrActionWaitingForCreation, ResNameReservedInstance, d.Id(), err) @@ -204,6 +206,69 @@ func resourceReservedInstanceUpdate(ctx context.Context, d *schema.ResourceData, return resourceReservedInstanceRead(ctx, d, meta) } +func FindReservedDBInstanceByID(ctx context.Context, conn *rds.RDS, id string) (*rds.ReservedDBInstance, error) { + input := &rds.DescribeReservedDBInstancesInput{ + ReservedDBInstanceId: aws.String(id), + } + + output, err := conn.DescribeReservedDBInstancesWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, rds.ErrCodeReservedDBInstanceNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || len(output.ReservedDBInstances) == 0 || output.ReservedDBInstances[0] == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + if count := len(output.ReservedDBInstances); count > 1 { + return nil, tfresource.NewTooManyResultsError(count, input) + } + + return output.ReservedDBInstances[0], nil +} + +func statusReservedInstance(ctx context.Context, conn *rds.RDS, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindReservedDBInstanceByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.State), nil + } +} + +func waitReservedInstanceCreated(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) error { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + ReservedInstanceStatePaymentPending, + }, + Target: []string{ReservedInstanceStateActive}, + Refresh: statusReservedInstance(ctx, conn, id), + NotFoundChecks: 5, + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + _, err := stateConf.WaitForStateContext(ctx) + + return err +} + func flattenRecurringCharges(recurringCharges []*rds.RecurringCharge) []interface{} { if len(recurringCharges) == 0 { return []interface{}{} @@ -213,7 +278,7 @@ func flattenRecurringCharges(recurringCharges []*rds.RecurringCharge) []interfac for _, recurringCharge := range recurringCharges { rawRecurringCharge := map[string]interface{}{ "recurring_charge_amount": recurringCharge.RecurringChargeAmount, - "recurring_charge_frequency": aws.ToString(recurringCharge.RecurringChargeFrequency), + "recurring_charge_frequency": aws.StringValue(recurringCharge.RecurringChargeFrequency), } rawRecurringCharges = append(rawRecurringCharges, rawRecurringCharge) diff --git a/internal/service/rds/reserved_instance_offering_data_source.go b/internal/service/rds/reserved_instance_offering_data_source.go index b7eafc625d6..d9b9b5eb01f 100644 --- a/internal/service/rds/reserved_instance_offering_data_source.go +++ b/internal/service/rds/reserved_instance_offering_data_source.go @@ -8,24 +8,25 @@ import ( "fmt" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -const ( - ResNameReservedInstanceOffering = "Reserved Instance Offering" -) - -// @SDKDataSource("aws_rds_reserved_instance_offering") -func DataSourceReservedOffering() *schema.Resource { +// @SDKDataSource("aws_rds_reserved_instance_offering", name="Reserved Instance Offering") +func dataSourceReservedOffering() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceReservedOfferingRead, + Schema: map[string]*schema.Schema{ "currency_code": { Type: schema.TypeString, @@ -70,7 +71,7 @@ func DataSourceReservedOffering() *schema.Resource { func dataSourceReservedOfferingRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) input := &rds.DescribeReservedDBInstancesOfferingsInput{ DBInstanceClass: aws.String(d.Get("db_instance_class").(string)), @@ -80,30 +81,60 @@ func dataSourceReservedOfferingRead(ctx context.Context, d *schema.ResourceData, ProductDescription: aws.String(d.Get("product_description").(string)), } - resp, err := conn.DescribeReservedDBInstancesOfferingsWithContext(ctx, input) - if err != nil { - return create.AppendDiagError(diags, names.RDS, create.ErrActionReading, ResNameReservedInstanceOffering, "unknown", err) - } - - if len(resp.ReservedDBInstancesOfferings) == 0 { - return sdkdiag.AppendErrorf(diags, "no %s %s found matching criteria; try different search", names.RDS, ResNameReservedInstanceOffering) - } + offering, err := findReservedDBInstancesOffering(ctx, conn, input, tfslices.PredicateTrue[*types.ReservedDBInstancesOffering]()) - if len(resp.ReservedDBInstancesOfferings) > 1 { - return sdkdiag.AppendErrorf(diags, "More than one %s %s found matching criteria; try different search", names.RDS, ResNameReservedInstanceOffering) + if err != nil { + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("RDS Reserved Instance Offering", err)) } - offering := resp.ReservedDBInstancesOfferings[0] - - d.SetId(aws.ToString(offering.ReservedDBInstancesOfferingId)) + offeringID := aws.ToString(offering.ReservedDBInstancesOfferingId) + d.SetId(offeringID) d.Set("currency_code", offering.CurrencyCode) d.Set("db_instance_class", offering.DBInstanceClass) d.Set(names.AttrDuration, offering.Duration) d.Set("fixed_price", offering.FixedPrice) d.Set("multi_az", offering.MultiAZ) + d.Set("offering_id", offeringID) d.Set("offering_type", offering.OfferingType) d.Set("product_description", offering.ProductDescription) - d.Set("offering_id", offering.ReservedDBInstancesOfferingId) return diags } + +func findReservedDBInstancesOffering(ctx context.Context, conn *rds.Client, input *rds.DescribeReservedDBInstancesOfferingsInput, filter tfslices.Predicate[*types.ReservedDBInstancesOffering]) (*types.ReservedDBInstancesOffering, error) { + output, err := findReservedDBInstancesOfferings(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findReservedDBInstancesOfferings(ctx context.Context, conn *rds.Client, input *rds.DescribeReservedDBInstancesOfferingsInput, filter tfslices.Predicate[*types.ReservedDBInstancesOffering]) ([]types.ReservedDBInstancesOffering, error) { + var output []types.ReservedDBInstancesOffering + + pages := rds.NewDescribeReservedDBInstancesOfferingsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*types.ReservedDBInstancesOfferingNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.ReservedDBInstancesOfferings { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} diff --git a/internal/service/rds/reserved_instance_offering_data_source_test.go b/internal/service/rds/reserved_instance_offering_data_source_test.go index 423deb5b6bc..2eb343f8f9a 100644 --- a/internal/service/rds/reserved_instance_offering_data_source_test.go +++ b/internal/service/rds/reserved_instance_offering_data_source_test.go @@ -18,7 +18,6 @@ func TestAccRDSInstanceOffering_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: nil, ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), Steps: []resource.TestStep{ { diff --git a/internal/service/rds/service_package_gen.go b/internal/service/rds/service_package_gen.go index f007684df4d..b72e26c3613 100644 --- a/internal/service/rds/service_package_gen.go +++ b/internal/service/rds/service_package_gen.go @@ -62,7 +62,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac Name: "DB Proxy", }, { - Factory: DataSourceSnapshot, + Factory: dataSourceSnapshot, TypeName: "aws_db_snapshot", Name: "DB Snapshot", Tags: &types.ServicePackageResourceTags{}, @@ -93,8 +93,9 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_rds_orderable_db_instance", }, { - Factory: DataSourceReservedOffering, + Factory: dataSourceReservedOffering, TypeName: "aws_rds_reserved_instance_offering", + Name: "Reserved Instance Offering", }, } } @@ -142,7 +143,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceParameterGroup, + Factory: resourceParameterGroup, TypeName: "aws_db_parameter_group", Name: "DB Parameter Group", Tags: &types.ServicePackageResourceTags{ @@ -176,7 +177,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka Name: "DB Proxy Target", }, { - Factory: ResourceSnapshot, + Factory: resourceSnapshot, TypeName: "aws_db_snapshot", Name: "DB Snapshot", Tags: &types.ServicePackageResourceTags{ @@ -184,9 +185,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceSnapshotCopy, + Factory: resourceSnapshotCopy, TypeName: "aws_db_snapshot_copy", - Name: "DB Snapshot", + Name: "DB Snapshot Copy", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: "db_snapshot_arn", }, @@ -217,7 +218,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka TypeName: "aws_rds_cluster_activity_stream", }, { - Factory: ResourceClusterEndpoint, + Factory: resourceClusterEndpoint, TypeName: "aws_rds_cluster_endpoint", Name: "Cluster Endpoint", Tags: &types.ServicePackageResourceTags{ @@ -233,7 +234,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceClusterParameterGroup, + Factory: resourceClusterParameterGroup, TypeName: "aws_rds_cluster_parameter_group", Name: "Cluster Parameter Group", Tags: &types.ServicePackageResourceTags{ @@ -241,8 +242,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceClusterRoleAssociation, + Factory: resourceClusterRoleAssociation, TypeName: "aws_rds_cluster_role_association", + Name: "Cluster IAM Role Association", }, { Factory: ResourceCustomDBEngineVersion, diff --git a/internal/service/rds/snapshot.go b/internal/service/rds/snapshot.go index 5076bd17736..782c7be4995 100644 --- a/internal/service/rds/snapshot.go +++ b/internal/service/rds/snapshot.go @@ -5,16 +5,18 @@ package rds import ( "context" + "fmt" "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -27,7 +29,7 @@ import ( // @SDKResource("aws_db_snapshot", name="DB Snapshot") // @Tags(identifierAttribute="db_snapshot_arn") // @Testing(tagsTest=false) -func ResourceSnapshot() *schema.Resource { +func resourceSnapshot() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSnapshotCreate, ReadWithoutTimeout: resourceSnapshotRead, @@ -136,23 +138,24 @@ func ResourceSnapshot() *schema.Resource { func resourceSnapshotCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) dbSnapshotID := d.Get("db_snapshot_identifier").(string) input := &rds.CreateDBSnapshotInput{ DBInstanceIdentifier: aws.String(d.Get("db_instance_identifier").(string)), DBSnapshotIdentifier: aws.String(dbSnapshotID), - Tags: getTagsIn(ctx), + Tags: getTagsInV2(ctx), } - output, err := conn.CreateDBSnapshotWithContext(ctx, input) + output, err := conn.CreateDBSnapshot(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "creating RDS DB Snapshot (%s): %s", dbSnapshotID, err) } - d.SetId(aws.StringValue(output.DBSnapshot.DBSnapshotIdentifier)) + d.SetId(aws.ToString(output.DBSnapshot.DBSnapshotIdentifier)) - if err := waitDBSnapshotCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + if _, err := waitDBSnapshotCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for RDS DB Snapshot (%s) create: %s", d.Id(), err) } @@ -160,10 +163,11 @@ func resourceSnapshotCreate(ctx context.Context, d *schema.ResourceData, meta in input := &rds.ModifyDBSnapshotAttributeInput{ AttributeName: aws.String("restore"), DBSnapshotIdentifier: aws.String(d.Id()), - ValuesToAdd: flex.ExpandStringSet(v.(*schema.Set)), + ValuesToAdd: flex.ExpandStringValueSet(v.(*schema.Set)), } - _, err := conn.ModifyDBSnapshotAttributeWithContext(ctx, input) + _, err := conn.ModifyDBSnapshotAttribute(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "modifying RDS DB Snapshot (%s) attribute: %s", d.Id(), err) } @@ -174,9 +178,9 @@ func resourceSnapshotCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceSnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) - snapshot, err := FindDBSnapshotByID(ctx, conn, d.Id()) + snapshot, err := findDBSnapshotByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] RDS DB Snapshot (%s) not found, removing from state", d.Id()) @@ -188,7 +192,7 @@ func resourceSnapshotRead(ctx context.Context, d *schema.ResourceData, meta inte return sdkdiag.AppendErrorf(diags, "reading RDS DB Snapshot (%s): %s", d.Id(), err) } - arn := aws.StringValue(snapshot.DBSnapshotArn) + arn := aws.ToString(snapshot.DBSnapshotArn) d.Set(names.AttrAllocatedStorage, snapshot.AllocatedStorage) d.Set(names.AttrAvailabilityZone, snapshot.AvailabilityZone) d.Set("db_instance_identifier", snapshot.DBInstanceIdentifier) @@ -208,46 +212,39 @@ func resourceSnapshotRead(ctx context.Context, d *schema.ResourceData, meta inte d.Set(names.AttrStatus, snapshot.Status) d.Set(names.AttrVPCID, snapshot.VpcId) - input := &rds.DescribeDBSnapshotAttributesInput{ - DBSnapshotIdentifier: aws.String(d.Id()), - } - - output, err := conn.DescribeDBSnapshotAttributesWithContext(ctx, input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading RDS DB Snapshot (%s) attributes: %s", d.Id(), err) + attribute, err := findDBSnapshotAttributeByTwoPartKey(ctx, conn, d.Id(), dbSnapshotAttributeNameRestore) + switch { + case err == nil: + d.Set("shared_accounts", attribute.AttributeValues) + case tfresource.NotFound(err): + default: + return sdkdiag.AppendErrorf(diags, "reading RDS DB Snapshot (%s) attribute: %s", d.Id(), err) } - d.Set("shared_accounts", flex.FlattenStringSet(output.DBSnapshotAttributesResult.DBSnapshotAttributes[0].AttributeValues)) - - setTagsOut(ctx, snapshot.TagList) + setTagsOutV2(ctx, snapshot.TagList) return diags } func resourceSnapshotUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) if d.HasChange("shared_accounts") { o, n := d.GetChange("shared_accounts") - os := o.(*schema.Set) - ns := n.(*schema.Set) - - additionList := ns.Difference(os) - removalList := os.Difference(ns) - + os, ns := o.(*schema.Set), n.(*schema.Set) + add, del := ns.Difference(os), os.Difference(ns) input := &rds.ModifyDBSnapshotAttributeInput{ AttributeName: aws.String("restore"), DBSnapshotIdentifier: aws.String(d.Id()), - ValuesToAdd: flex.ExpandStringSet(additionList), - ValuesToRemove: flex.ExpandStringSet(removalList), + ValuesToAdd: flex.ExpandStringValueSet(add), + ValuesToRemove: flex.ExpandStringValueSet(del), } - _, err := conn.ModifyDBSnapshotAttributeWithContext(ctx, input) + _, err := conn.ModifyDBSnapshotAttribute(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying RDS DB Snapshot (%s) attributes: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying RDS DB Snapshot (%s) attribute: %s", d.Id(), err) } } @@ -256,14 +253,14 @@ func resourceSnapshotUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceSnapshotDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) log.Printf("[DEBUG] Deleting RDS DB Snapshot: %s", d.Id()) - _, err := conn.DeleteDBSnapshotWithContext(ctx, &rds.DeleteDBSnapshotInput{ + _, err := conn.DeleteDBSnapshot(ctx, &rds.DeleteDBSnapshotInput{ DBSnapshotIdentifier: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBSnapshotNotFoundFault) { + if errs.IsA[*types.DBSnapshotNotFoundFault](err) { return diags } @@ -274,18 +271,18 @@ func resourceSnapshotDelete(ctx context.Context, d *schema.ResourceData, meta in return diags } -func FindDBSnapshotByID(ctx context.Context, conn *rds.RDS, id string) (*rds.DBSnapshot, error) { +func findDBSnapshotByID(ctx context.Context, conn *rds.Client, id string) (*types.DBSnapshot, error) { input := &rds.DescribeDBSnapshotsInput{ DBSnapshotIdentifier: aws.String(id), } - output, err := findDBSnapshot(ctx, conn, input, tfslices.PredicateTrue[*rds.DBSnapshot]()) + output, err := findDBSnapshot(ctx, conn, input, tfslices.PredicateTrue[*types.DBSnapshot]()) if err != nil { return nil, err } // Eventual consistency check. - if aws.StringValue(output.DBSnapshotIdentifier) != id { + if aws.ToString(output.DBSnapshotIdentifier) != id { return nil, &retry.NotFoundError{ LastRequest: input, } @@ -294,34 +291,104 @@ func FindDBSnapshotByID(ctx context.Context, conn *rds.RDS, id string) (*rds.DBS return output, nil } -func findDBSnapshot(ctx context.Context, conn *rds.RDS, input *rds.DescribeDBSnapshotsInput, filter tfslices.Predicate[*rds.DBSnapshot]) (*rds.DBSnapshot, error) { +func findDBSnapshot(ctx context.Context, conn *rds.Client, input *rds.DescribeDBSnapshotsInput, filter tfslices.Predicate[*types.DBSnapshot]) (*types.DBSnapshot, error) { output, err := findDBSnapshots(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findDBSnapshots(ctx context.Context, conn *rds.RDS, input *rds.DescribeDBSnapshotsInput, filter tfslices.Predicate[*rds.DBSnapshot]) ([]*rds.DBSnapshot, error) { - var output []*rds.DBSnapshot +func findDBSnapshots(ctx context.Context, conn *rds.Client, input *rds.DescribeDBSnapshotsInput, filter tfslices.Predicate[*types.DBSnapshot]) ([]types.DBSnapshot, error) { + var output []types.DBSnapshot + + pages := rds.NewDescribeDBSnapshotsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - err := conn.DescribeDBSnapshotsPagesWithContext(ctx, input, func(page *rds.DescribeDBSnapshotsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + if errs.IsA[*types.DBSnapshotNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err } for _, v := range page.DBSnapshots { - if v != nil && filter(v) { + if filter(&v) { output = append(output, v) } } + } + + return output, nil +} + +func statusDBSnapshot(ctx context.Context, conn *rds.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDBSnapshotByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } - return !lastPage + return output, aws.ToString(output.Status), nil + } +} + +func waitDBSnapshotCreated(ctx context.Context, conn *rds.Client, id string, timeout time.Duration) (*types.DBSnapshot, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{dbSnapshotCreating}, + Target: []string{dbSnapshotAvailable}, + Refresh: statusDBSnapshot(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.DBSnapshot); ok { + tfresource.SetLastError(err, fmt.Errorf("%d%% progress", aws.ToInt32(output.PercentProgress))) + return output, err + } + + return nil, err +} + +func findDBSnapshotAttributeByTwoPartKey(ctx context.Context, conn *rds.Client, id, attributeName string) (*types.DBSnapshotAttribute, error) { + input := &rds.DescribeDBSnapshotAttributesInput{ + DBSnapshotIdentifier: aws.String(id), + } + + return findDBSnapshotAttribute(ctx, conn, input, func(v *types.DBSnapshotAttribute) bool { + return aws.ToString(v.AttributeName) == attributeName }) +} + +func findDBSnapshotAttribute(ctx context.Context, conn *rds.Client, input *rds.DescribeDBSnapshotAttributesInput, filter tfslices.Predicate[*types.DBSnapshotAttribute]) (*types.DBSnapshotAttribute, error) { + output, err := findDBSnapshotAttributes(ctx, conn, input, filter) + + if err != nil { + return nil, err + } - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBSnapshotNotFoundFault) { + return tfresource.AssertSingleValueResult(output) +} + +func findDBSnapshotAttributes(ctx context.Context, conn *rds.Client, input *rds.DescribeDBSnapshotAttributesInput, filter tfslices.Predicate[*types.DBSnapshotAttribute]) ([]types.DBSnapshotAttribute, error) { + output, err := conn.DescribeDBSnapshotAttributes(ctx, input) + + if errs.IsA[*types.DBSnapshotNotFoundFault](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -332,5 +399,9 @@ func findDBSnapshots(ctx context.Context, conn *rds.RDS, input *rds.DescribeDBSn return nil, err } - return output, nil + if output == nil || output.DBSnapshotAttributesResult == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return tfslices.Filter(output.DBSnapshotAttributesResult.DBSnapshotAttributes, tfslices.PredicateValue(filter)), nil } diff --git a/internal/service/rds/snapshot_copy.go b/internal/service/rds/snapshot_copy.go index e3180529a51..2684948ed2a 100644 --- a/internal/service/rds/snapshot_copy.go +++ b/internal/service/rds/snapshot_copy.go @@ -9,24 +9,26 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_db_snapshot_copy", name="DB Snapshot") +// @SDKResource("aws_db_snapshot_copy", name="DB Snapshot Copy") // @Tags(identifierAttribute="db_snapshot_arn") // @Testing(tagsTest=false) -func ResourceSnapshotCopy() *schema.Resource { +func resourceSnapshotCopy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSnapshotCopyCreate, ReadWithoutTimeout: resourceSnapshotCopyRead, @@ -104,6 +106,11 @@ func ResourceSnapshotCopy() *schema.Resource { Optional: true, ForceNew: true, }, + "shared_accounts": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "source_db_snapshot_identifier": { Type: schema.TypeString, Required: true, @@ -149,12 +156,12 @@ func ResourceSnapshotCopy() *schema.Resource { func resourceSnapshotCopyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) targetDBSnapshotID := d.Get("target_db_snapshot_identifier").(string) input := &rds.CopyDBSnapshotInput{ SourceDBSnapshotIdentifier: aws.String(d.Get("source_db_snapshot_identifier").(string)), - Tags: getTagsIn(ctx), + Tags: getTagsInV2(ctx), TargetDBSnapshotIdentifier: aws.String(targetDBSnapshotID), } @@ -162,10 +169,6 @@ func resourceSnapshotCopyCreate(ctx context.Context, d *schema.ResourceData, met input.CopyTags = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("destination_region"); ok { - input.DestinationRegion = aws.String(v.(string)) - } - if v, ok := d.GetOk(names.AttrKMSKeyID); ok { input.KmsKeyId = aws.String(v.(string)) } @@ -176,27 +179,54 @@ func resourceSnapshotCopyCreate(ctx context.Context, d *schema.ResourceData, met if v, ok := d.GetOk("presigned_url"); ok { input.PreSignedUrl = aws.String(v.(string)) + } else if v, ok := d.GetOk("destination_region"); ok { + output, err := rds.NewPresignClient(conn, func(o *rds.PresignOptions) { + o.ClientOptions = append(o.ClientOptions, func(o *rds.Options) { + o.Region = v.(string) + }) + }).PresignCopyDBSnapshot(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "presigning RDS DB Snapshot Copy (%s) request: %s", targetDBSnapshotID, err) + } + + input.PreSignedUrl = aws.String(output.URL) } - output, err := conn.CopyDBSnapshotWithContext(ctx, input) + output, err := conn.CopyDBSnapshot(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "creating RDS DB Snapshot Copy (%s): %s", targetDBSnapshotID, err) } - d.SetId(aws.StringValue(output.DBSnapshot.DBSnapshotIdentifier)) + d.SetId(aws.ToString(output.DBSnapshot.DBSnapshotIdentifier)) - if err := waitDBSnapshotCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + if _, err := waitDBSnapshotCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for RDS DB Snapshot Copy (%s) create: %s", d.Id(), err) } + if v, ok := d.GetOk("shared_accounts"); ok && v.(*schema.Set).Len() > 0 { + input := &rds.ModifyDBSnapshotAttributeInput{ + AttributeName: aws.String("restore"), + DBSnapshotIdentifier: aws.String(d.Id()), + ValuesToAdd: flex.ExpandStringValueSet(v.(*schema.Set)), + } + + _, err := conn.ModifyDBSnapshotAttribute(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "modifying RDS DB Snapshot (%s) attribute: %s", d.Id(), err) + } + } + return append(diags, resourceSnapshotCopyRead(ctx, d, meta)...) } func resourceSnapshotCopyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) - snapshot, err := FindDBSnapshotByID(ctx, conn, d.Id()) + snapshot, err := findDBSnapshotByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] RDS DB Snapshot (%s) not found, removing from state", d.Id()) @@ -208,7 +238,7 @@ func resourceSnapshotCopyRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "reading RDS DB Snapshot Copy (%s): %s", d.Id(), err) } - arn := aws.StringValue(snapshot.DBSnapshotArn) + arn := aws.ToString(snapshot.DBSnapshotArn) d.Set(names.AttrAllocatedStorage, snapshot.AllocatedStorage) d.Set(names.AttrAvailabilityZone, snapshot.AvailabilityZone) d.Set("db_snapshot_arn", arn) @@ -227,24 +257,53 @@ func resourceSnapshotCopyRead(ctx context.Context, d *schema.ResourceData, meta d.Set("target_db_snapshot_identifier", snapshot.DBSnapshotIdentifier) d.Set(names.AttrVPCID, snapshot.VpcId) + attribute, err := findDBSnapshotAttributeByTwoPartKey(ctx, conn, d.Id(), dbSnapshotAttributeNameRestore) + switch { + case err == nil: + d.Set("shared_accounts", attribute.AttributeValues) + case tfresource.NotFound(err): + default: + return sdkdiag.AppendErrorf(diags, "reading RDS DB Snapshot (%s) attribute: %s", d.Id(), err) + } + return diags } func resourceSnapshotCopyUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // Tags only. - return resourceSnapshotCopyRead(ctx, d, meta) + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).RDSClient(ctx) + + if d.HasChange("shared_accounts") { + o, n := d.GetChange("shared_accounts") + os, ns := o.(*schema.Set), n.(*schema.Set) + add, del := ns.Difference(os), os.Difference(ns) + input := &rds.ModifyDBSnapshotAttributeInput{ + AttributeName: aws.String("restore"), + DBSnapshotIdentifier: aws.String(d.Id()), + ValuesToAdd: flex.ExpandStringValueSet(add), + ValuesToRemove: flex.ExpandStringValueSet(del), + } + + _, err := conn.ModifyDBSnapshotAttribute(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "modifying RDS DB Snapshot (%s) attribute: %s", d.Id(), err) + } + } + + return append(diags, resourceSnapshotCopyRead(ctx, d, meta)...) } func resourceSnapshotCopyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) log.Printf("[DEBUG] Deleting RDS DB Snapshot Copy: %s", d.Id()) - _, err := conn.DeleteDBSnapshotWithContext(ctx, &rds.DeleteDBSnapshotInput{ + _, err := conn.DeleteDBSnapshot(ctx, &rds.DeleteDBSnapshotInput{ DBSnapshotIdentifier: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBSnapshotNotFoundFault) { + if errs.IsA[*types.DBSnapshotNotFoundFault](err) { return diags } diff --git a/internal/service/rds/snapshot_copy_test.go b/internal/service/rds/snapshot_copy_test.go index 94ef126193a..b558ff7e182 100644 --- a/internal/service/rds/snapshot_copy_test.go +++ b/internal/service/rds/snapshot_copy_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -25,7 +25,7 @@ func TestAccRDSSnapshotCopy_basic(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var v rds.DBSnapshot + var v types.DBSnapshot resourceName := "aws_db_snapshot_copy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -39,6 +39,8 @@ func TestAccRDSSnapshotCopy_basic(t *testing.T) { Config: testAccSnapshotCopyConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckSnapshotCopyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "shared_accounts.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), ), }, { @@ -50,13 +52,53 @@ func TestAccRDSSnapshotCopy_basic(t *testing.T) { }) } +func TestAccRDSSnapshotCopy_share(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v types.DBSnapshot + resourceName := "aws_db_snapshot_copy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSnapshotCopyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSnapshotCopyConfig_share(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckSnapshotCopyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "shared_accounts.#", acctest.Ct1), + resource.TestCheckTypeSetElemAttr(resourceName, "shared_accounts.*", "all"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSnapshotCopyConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckSnapshotCopyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "shared_accounts.#", acctest.Ct0), + ), + }, + }, + }) +} + func TestAccRDSSnapshotCopy_tags(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") } - var v rds.DBSnapshot + var v types.DBSnapshot resourceName := "aws_db_snapshot_copy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -106,7 +148,7 @@ func TestAccRDSSnapshotCopy_disappears(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var v rds.DBSnapshot + var v types.DBSnapshot resourceName := "aws_db_snapshot_copy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -128,9 +170,55 @@ func TestAccRDSSnapshotCopy_disappears(t *testing.T) { }) } +func TestAccRDSSnapshotCopy_destinationRegion(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v types.DBSnapshot + resourceName := "aws_db_snapshot_copy.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + CheckDestroy: testAccCheckSnapshotCopyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSnapshotCopyConfig_destinationRegion(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckSnapshotCopyExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrSet(resourceName, names.AttrAllocatedStorage), + resource.TestCheckResourceAttrSet(resourceName, names.AttrAvailabilityZone), + resource.TestCheckResourceAttr(resourceName, "destination_region", acctest.AlternateRegion()), + resource.TestCheckResourceAttr(resourceName, names.AttrEncrypted, acctest.CtFalse), + resource.TestCheckResourceAttrSet(resourceName, names.AttrEngine), + resource.TestCheckResourceAttrSet(resourceName, names.AttrEngineVersion), + resource.TestCheckResourceAttrSet(resourceName, names.AttrIOPS), + resource.TestCheckResourceAttr(resourceName, names.AttrKMSKeyID, ""), + resource.TestCheckResourceAttrSet(resourceName, "license_model"), + resource.TestCheckResourceAttrSet(resourceName, "option_group_name"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrPort), + resource.TestCheckResourceAttrSet(resourceName, "snapshot_type"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrStorageType), + resource.TestCheckResourceAttrSet(resourceName, names.AttrVPCID), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"destination_region"}, + }, + }, + }) +} + func testAccCheckSnapshotCopyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_snapshot_copy" { @@ -154,18 +242,14 @@ func testAccCheckSnapshotCopyDestroy(ctx context.Context) resource.TestCheckFunc } } -func testAccCheckSnapshotCopyExists(ctx context.Context, n string, v *rds.DBSnapshot) resource.TestCheckFunc { +func testAccCheckSnapshotCopyExists(ctx context.Context, n string, v *types.DBSnapshot) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No RDS DB Snapshot Copy ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) output, err := tfrds.FindDBSnapshotByID(ctx, conn, rs.Primary.ID) if err != nil { @@ -243,3 +327,22 @@ resource "aws_db_snapshot_copy" "test" { } }`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } + +func testAccSnapshotCopyConfig_share(rName string) string { + return acctest.ConfigCompose(testAccSnapshotCopyConfig_base(rName), fmt.Sprintf(` +resource "aws_db_snapshot_copy" "test" { + source_db_snapshot_identifier = aws_db_snapshot.test.db_snapshot_arn + target_db_snapshot_identifier = "%[1]s-target" + shared_accounts = ["all"] +} +`, rName)) +} + +func testAccSnapshotCopyConfig_destinationRegion(rName string) string { + return acctest.ConfigCompose(testAccSnapshotCopyConfig_base(rName), fmt.Sprintf(` +resource "aws_db_snapshot_copy" "test" { + source_db_snapshot_identifier = aws_db_snapshot.test.db_snapshot_arn + target_db_snapshot_identifier = "%[1]s-target" + destination_region = %[2]q +}`, rName, acctest.AlternateRegion())) +} diff --git a/internal/service/rds/snapshot_data_source.go b/internal/service/rds/snapshot_data_source.go index 168eba32280..40e0820367a 100644 --- a/internal/service/rds/snapshot_data_source.go +++ b/internal/service/rds/snapshot_data_source.go @@ -8,8 +8,9 @@ import ( "sort" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -22,7 +23,7 @@ import ( // @SDKDataSource("aws_db_snapshot", name="DB Snapshot") // @Tags // @Testing(tagsTest=false) -func DataSourceSnapshot() *schema.Resource { +func dataSourceSnapshot() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceSnapshotRead, @@ -133,7 +134,7 @@ func DataSourceSnapshot() *schema.Resource { func dataSourceSnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) + conn := meta.(*conns.AWSClient).RDSClient(ctx) input := &rds.DescribeDBSnapshotsInput{ IncludePublic: aws.Bool(d.Get("include_public").(bool)), @@ -152,10 +153,10 @@ func dataSourceSnapshotRead(ctx context.Context, d *schema.ResourceData, meta in input.SnapshotType = aws.String(v.(string)) } - f := tfslices.PredicateTrue[*rds.DBSnapshot]() - if tags := getTagsIn(ctx); len(tags) > 0 { - f = func(v *rds.DBSnapshot) bool { - return KeyValueTags(ctx, v.TagList).ContainsAll(KeyValueTags(ctx, tags)) + f := tfslices.PredicateTrue[*types.DBSnapshot]() + if tags := getTagsInV2(ctx); len(tags) > 0 { + f = func(v *types.DBSnapshot) bool { + return keyValueTagsV2(ctx, v.TagList).ContainsAll(keyValueTagsV2(ctx, tags)) } } @@ -169,7 +170,7 @@ func dataSourceSnapshotRead(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "Your query returned no results. Please change your search criteria and try again.") } - var snapshot *rds.DBSnapshot + var snapshot *types.DBSnapshot if len(snapshots) > 1 { if d.Get(names.AttrMostRecent).(bool) { snapshot = mostRecentDBSnapshot(snapshots) @@ -177,10 +178,10 @@ func dataSourceSnapshotRead(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "Your query returned more than one result. Please try a more specific search criteria.") } } else { - snapshot = snapshots[0] + snapshot = &snapshots[0] } - d.SetId(aws.StringValue(snapshot.DBSnapshotIdentifier)) + d.SetId(aws.ToString(snapshot.DBSnapshotIdentifier)) d.Set(names.AttrAllocatedStorage, snapshot.AllocatedStorage) d.Set(names.AttrAvailabilityZone, snapshot.AvailabilityZone) d.Set("db_instance_identifier", snapshot.DBInstanceIdentifier) @@ -207,12 +208,12 @@ func dataSourceSnapshotRead(ctx context.Context, d *schema.ResourceData, meta in d.Set(names.AttrStorageType, snapshot.StorageType) d.Set(names.AttrVPCID, snapshot.VpcId) - setTagsOut(ctx, snapshot.TagList) + setTagsOutV2(ctx, snapshot.TagList) return diags } -type rdsSnapshotSort []*rds.DBSnapshot +type rdsSnapshotSort []types.DBSnapshot func (a rdsSnapshotSort) Len() int { return len(a) } func (a rdsSnapshotSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } @@ -225,11 +226,11 @@ func (a rdsSnapshotSort) Less(i, j int) bool { return false } - return (*a[i].SnapshotCreateTime).Before(*a[j].SnapshotCreateTime) + return (aws.ToTime(a[i].SnapshotCreateTime)).Before(aws.ToTime(a[j].SnapshotCreateTime)) } -func mostRecentDBSnapshot(snapshots []*rds.DBSnapshot) *rds.DBSnapshot { +func mostRecentDBSnapshot(snapshots []types.DBSnapshot) *types.DBSnapshot { sortedSnapshots := snapshots sort.Sort(rdsSnapshotSort(sortedSnapshots)) - return sortedSnapshots[len(sortedSnapshots)-1] + return &sortedSnapshots[len(sortedSnapshots)-1] } diff --git a/internal/service/rds/snapshot_test.go b/internal/service/rds/snapshot_test.go index 3286111bdf5..a262307bc2f 100644 --- a/internal/service/rds/snapshot_test.go +++ b/internal/service/rds/snapshot_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -26,7 +26,7 @@ func TestAccRDSSnapshot_basic(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var v rds.DBSnapshot + var v types.DBSnapshot resourceName := "aws_db_snapshot.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -60,7 +60,7 @@ func TestAccRDSSnapshot_share(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var v rds.DBSnapshot + var v types.DBSnapshot resourceName := "aws_db_snapshot.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -100,7 +100,7 @@ func TestAccRDSSnapshot_tags(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var v rds.DBSnapshot + var v types.DBSnapshot resourceName := "aws_db_snapshot.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -150,7 +150,7 @@ func TestAccRDSSnapshot_disappears(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var v rds.DBSnapshot + var v types.DBSnapshot rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_db_snapshot.test" @@ -174,7 +174,7 @@ func TestAccRDSSnapshot_disappears(t *testing.T) { func testAccCheckDBSnapshotDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_snapshot" { @@ -198,18 +198,14 @@ func testAccCheckDBSnapshotDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckDBSnapshotExists(ctx context.Context, n string, v *rds.DBSnapshot) resource.TestCheckFunc { +func testAccCheckDBSnapshotExists(ctx context.Context, n string, v *types.DBSnapshot) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No RDS DB Snapshot ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) output, err := tfrds.FindDBSnapshotByID(ctx, conn, rs.Primary.ID) if err != nil { diff --git a/internal/service/rds/status.go b/internal/service/rds/status.go deleted file mode 100644 index be79678f8b1..00000000000 --- a/internal/service/rds/status.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package rds - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func statusDBClusterRole(ctx context.Context, conn *rds.RDS, dbClusterID, roleARN string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDBClusterRoleByDBClusterIDAndRoleARN(ctx, conn, dbClusterID, roleARN) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - -func statusReservedInstance(ctx context.Context, conn *rds.RDS, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindReservedDBInstanceByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.State), nil - } -} - -func statusDBSnapshot(ctx context.Context, conn *rds.RDS, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDBSnapshotByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} diff --git a/internal/service/rds/sweep.go b/internal/service/rds/sweep.go index dd1abe5cad0..24d3109d55a 100644 --- a/internal/service/rds/sweep.go +++ b/internal/service/rds/sweep.go @@ -131,7 +131,7 @@ func sweepClusterParameterGroups(region string) error { continue } - r := ResourceClusterParameterGroup() + r := resourceClusterParameterGroup() d := r.Data(nil) d.SetId(name) @@ -484,7 +484,7 @@ func sweepParameterGroups(region string) error { continue } - r := ResourceParameterGroup() + r := resourceParameterGroup() d := r.Data(nil) d.SetId(name) @@ -578,7 +578,7 @@ func sweepSnapshots(region string) error { continue } - r := ResourceSnapshot() + r := resourceSnapshot() d := r.Data(nil) d.SetId(id) diff --git a/internal/service/rds/wait.go b/internal/service/rds/wait.go deleted file mode 100644 index 1932090e361..00000000000 --- a/internal/service/rds/wait.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package rds - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -func waitDBClusterRoleAssociationCreated(ctx context.Context, conn *rds.RDS, dbClusterID, roleARN string, timeout time.Duration) (*rds.DBClusterRole, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{clusterRoleStatusPending}, - Target: []string{clusterRoleStatusActive}, - Refresh: statusDBClusterRole(ctx, conn, dbClusterID, roleARN), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*rds.DBClusterRole); ok { - return output, err - } - - return nil, err -} - -func waitDBClusterRoleAssociationDeleted(ctx context.Context, conn *rds.RDS, dbClusterID, roleARN string, timeout time.Duration) (*rds.DBClusterRole, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{clusterRoleStatusActive, clusterRoleStatusPending}, - Target: []string{}, - Refresh: statusDBClusterRole(ctx, conn, dbClusterID, roleARN), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*rds.DBClusterRole); ok { - return output, err - } - - return nil, err -} - -func waitDBClusterInstanceCreated(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) (*rds.DBInstance, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - InstanceStatusBackingUp, - InstanceStatusConfiguringEnhancedMonitoring, - InstanceStatusConfiguringIAMDatabaseAuth, - InstanceStatusConfiguringLogExports, - InstanceStatusCreating, - InstanceStatusMaintenance, - InstanceStatusModifying, - InstanceStatusRebooting, - InstanceStatusRenaming, - InstanceStatusResettingMasterCredentials, - InstanceStatusStarting, - InstanceStatusStorageOptimization, - InstanceStatusUpgrading, - }, - Target: []string{InstanceStatusAvailable}, - Refresh: statusDBInstanceSDKv1(ctx, conn, id), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*rds.DBInstance); ok { - return output, err - } - - return nil, err -} - -func waitDBClusterInstanceUpdated(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) (*rds.DBInstance, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - InstanceStatusBackingUp, - InstanceStatusConfiguringEnhancedMonitoring, - InstanceStatusConfiguringIAMDatabaseAuth, - InstanceStatusConfiguringLogExports, - InstanceStatusCreating, - InstanceStatusMaintenance, - InstanceStatusModifying, - InstanceStatusRebooting, - InstanceStatusRenaming, - InstanceStatusResettingMasterCredentials, - InstanceStatusStarting, - InstanceStatusStorageOptimization, - InstanceStatusUpgrading, - }, - Target: []string{InstanceStatusAvailable}, - Refresh: statusDBInstanceSDKv1(ctx, conn, id), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*rds.DBInstance); ok { - return output, err - } - - return nil, err -} - -func waitDBClusterInstanceDeleted(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) (*rds.DBInstance, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - InstanceStatusConfiguringLogExports, - InstanceStatusDeletePreCheck, - InstanceStatusDeleting, - InstanceStatusModifying, - }, - Target: []string{}, - Refresh: statusDBInstanceSDKv1(ctx, conn, id), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*rds.DBInstance); ok { - return output, err - } - - return nil, err -} - -func waitReservedInstanceCreated(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - ReservedInstanceStatePaymentPending, - }, - Target: []string{ReservedInstanceStateActive}, - Refresh: statusReservedInstance(ctx, conn, id), - NotFoundChecks: 5, - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} - -func waitDBSnapshotCreated(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{DBSnapshotCreating}, - Target: []string{DBSnapshotAvailable}, - Refresh: statusDBSnapshot(ctx, conn, id), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} diff --git a/internal/slices/predicates.go b/internal/slices/predicates.go index 962388c7a26..cfac1eac942 100644 --- a/internal/slices/predicates.go +++ b/internal/slices/predicates.go @@ -42,3 +42,9 @@ func PredicateTrue[T any]() Predicate[T] { return true } } + +func PredicateValue[T any](predicate Predicate[*T]) Predicate[T] { + return func(v T) bool { + return predicate(&v) + } +} diff --git a/website/docs/r/db_snapshot_copy.html.markdown b/website/docs/r/db_snapshot_copy.html.markdown index d42e57928f7..c0ee3efb5bc 100644 --- a/website/docs/r/db_snapshot_copy.html.markdown +++ b/website/docs/r/db_snapshot_copy.html.markdown @@ -67,6 +67,7 @@ This resource exports the following attributes in addition to the arguments abov * `kms_key_id` - The ARN for the KMS encryption key. * `license_model` - License model information for the restored DB instance. * `option_group_name` - Provides the option group name for the DB snapshot. +* `shared_accounts` - (Optional) List of AWS Account ids to share snapshot with, use `all` to make snaphot public. * `source_db_snapshot_identifier` - The DB snapshot Arn that the DB snapshot was copied from. It only has value in case of cross customer or cross region copy. * `source_region` - The region that the DB snapshot was created in or copied from. * `storage_type` - Specifies the storage type associated with DB snapshot.