From 83dbd0f24c0445d2700cd863be6871da15bf4aa2 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Mon, 16 Oct 2023 10:18:18 -0400 Subject: [PATCH 01/46] feat: Udpate EKS to use aws-sdk-go-v2 --- go.mod | 1 + go.sum | 2 + internal/conns/awsclient_gen.go | 6 +- .../service/dms/event_subscription_test.go | 6 +- internal/service/eks/addon.go | 120 ++--- internal/service/eks/addon_data_source.go | 10 +- .../service/eks/addon_data_source_test.go | 11 +- internal/service/eks/addon_test.go | 143 +++--- .../service/eks/addon_version_data_source.go | 4 +- .../eks/addon_version_data_source_test.go | 19 +- internal/service/eks/arn.go | 2 +- internal/service/eks/cluster.go | 428 ++++++++++-------- .../eks/cluster_auth_data_source_test.go | 4 +- internal/service/eks/cluster_data_source.go | 9 +- .../service/eks/cluster_data_source_test.go | 6 +- internal/service/eks/cluster_test.go | 99 ++-- internal/service/eks/clusters_data_source.go | 26 +- .../service/eks/clusters_data_source_test.go | 4 +- internal/service/eks/consts.go | 10 - internal/service/eks/errors.go | 40 +- internal/service/eks/fargate_profile.go | 67 ++- internal/service/eks/fargate_profile_test.go | 35 +- internal/service/eks/find.go | 89 ++-- internal/service/eks/generate.go | 2 +- .../service/eks/identity_provider_config.go | 58 +-- .../eks/identity_provider_config_test.go | 29 +- internal/service/eks/node_group.go | 218 +++++---- .../service/eks/node_group_data_source.go | 7 +- .../eks/node_group_data_source_test.go | 7 +- internal/service/eks/node_group_test.go | 147 +++--- .../service/eks/node_groups_data_source.go | 29 +- .../eks/node_groups_data_source_test.go | 4 +- internal/service/eks/service_package_gen.go | 17 +- internal/service/eks/status.go | 39 +- internal/service/eks/sweep.go | 357 +++++++-------- internal/service/eks/tags_gen.go | 33 +- internal/service/eks/wait.go | 165 +------ names/names_data.csv | 2 +- 38 files changed, 1087 insertions(+), 1168 deletions(-) diff --git a/go.mod b/go.mod index 58e9c2753f0..c76b69c490d 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/directoryservice v1.18.7 github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.3.2 github.com/aws/aws-sdk-go-v2/service/ec2 v1.126.0 + github.com/aws/aws-sdk-go-v2/service/eks v1.29.7 github.com/aws/aws-sdk-go-v2/service/emrserverless v1.11.2 github.com/aws/aws-sdk-go-v2/service/finspace v1.12.4 github.com/aws/aws-sdk-go-v2/service/fis v1.16.2 diff --git a/go.sum b/go.sum index 3daed8dcfc3..3af4e428798 100644 --- a/go.sum +++ b/go.sum @@ -81,6 +81,8 @@ github.com/aws/aws-sdk-go-v2/service/dynamodb v1.22.1 h1:TYq4EU2vEEluoaBG0RCPnbi github.com/aws/aws-sdk-go-v2/service/dynamodb v1.22.1/go.mod h1:1EJb9/tJwI7iqiStZBcmHijQxcgp7dlPuD2YgoZIrJQ= github.com/aws/aws-sdk-go-v2/service/ec2 v1.126.0 h1:EGYP4IDYHYe4IcpCUxEAIVKr9nZXvtql4HNhEPK1Y3w= github.com/aws/aws-sdk-go-v2/service/ec2 v1.126.0/go.mod h1:raUdIDoNuDPn9dMG3cCmIm8RoWOmZUqQPzuw8xpmB8Y= +github.com/aws/aws-sdk-go-v2/service/eks v1.29.7 h1:MRBXts9pc/3RdaeLXR4HwmVhqMP70sOEYHpkgmAWDbU= +github.com/aws/aws-sdk-go-v2/service/eks v1.29.7/go.mod h1:Nt5l6Vn68Hv0JWJ6dcQDKuBAKAfHUZSC9Ln8X/1fUMY= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.11.2 h1:diyMrawOZ56CavFS//UFFjk2LY1ooXeTqAecXsjtXwI= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.11.2/go.mod h1:ZrmnnT6zI3+0XsQIGCu/vXhIFk4Vwu4WKqeMDSzm4z4= github.com/aws/aws-sdk-go-v2/service/finspace v1.12.4 h1:xML1DGju5bsRtFejZfHuWtaut3WkYrGTHMf2G3T3de0= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 0a73c487c30..5a0a0c9490b 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -20,6 +20,7 @@ import ( directoryservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/directoryservice" docdbelastic_sdkv2 "github.com/aws/aws-sdk-go-v2/service/docdbelastic" ec2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ec2" + eks_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eks" emrserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrserverless" finspace_sdkv2 "github.com/aws/aws-sdk-go-v2/service/finspace" fis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fis" @@ -131,7 +132,6 @@ import ( ecrpublic_sdkv1 "github.com/aws/aws-sdk-go/service/ecrpublic" ecs_sdkv1 "github.com/aws/aws-sdk-go/service/ecs" efs_sdkv1 "github.com/aws/aws-sdk-go/service/efs" - eks_sdkv1 "github.com/aws/aws-sdk-go/service/eks" elasticache_sdkv1 "github.com/aws/aws-sdk-go/service/elasticache" elasticbeanstalk_sdkv1 "github.com/aws/aws-sdk-go/service/elasticbeanstalk" elasticsearchservice_sdkv1 "github.com/aws/aws-sdk-go/service/elasticsearchservice" @@ -527,8 +527,8 @@ func (c *AWSClient) EFSConn(ctx context.Context) *efs_sdkv1.EFS { return errs.Must(conn[*efs_sdkv1.EFS](ctx, c, names.EFS)) } -func (c *AWSClient) EKSConn(ctx context.Context) *eks_sdkv1.EKS { - return errs.Must(conn[*eks_sdkv1.EKS](ctx, c, names.EKS)) +func (c *AWSClient) EKSClient(ctx context.Context) *eks_sdkv2.Client { + return errs.Must(client[*eks_sdkv2.Client](ctx, c, names.EKS)) } func (c *AWSClient) ELBConn(ctx context.Context) *elb_sdkv1.ELB { diff --git a/internal/service/dms/event_subscription_test.go b/internal/service/dms/event_subscription_test.go index c6665a4c870..0f961a8d5ce 100644 --- a/internal/service/dms/event_subscription_test.go +++ b/internal/service/dms/event_subscription_test.go @@ -8,9 +8,9 @@ import ( "fmt" "testing" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/aws/aws-sdk-go/aws" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -389,11 +389,11 @@ resource "aws_dms_event_subscription" "test" { } func testAccPreCheckEKS(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) input := &eks.ListClustersInput{} - _, err := conn.ListClustersWithContext(ctx, input) + _, err := client.ListClusters(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/eks/addon.go b/internal/service/eks/addon.go index ab089032a25..328b482baf0 100644 --- a/internal/service/eks/addon.go +++ b/internal/service/eks/addon.go @@ -6,17 +6,20 @@ package eks import ( "context" "log" + "strings" "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -89,26 +92,23 @@ func ResourceAddon() *schema.Resource { Optional: true, }, "resolve_conflicts": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(eks.ResolveConflicts_Values(), false), - Deprecated: `The "resolve_conflicts" attribute can't be set to "PRESERVE" on initial resource creation. Use "resolve_conflicts_on_create" and/or "resolve_conflicts_on_update" instead`, + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ResolveConflicts](), + Deprecated: `The "resolve_conflicts" attribute can't be set to "PRESERVE" on initial resource creation. Use "resolve_conflicts_on_create" and/or "resolve_conflicts_on_update" instead`, }, "resolve_conflicts_on_create": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - eks.ResolveConflictsNone, - eks.ResolveConflictsOverwrite, - }, false), - ConflictsWith: []string{"resolve_conflicts"}, - }, - "resolve_conflicts_on_update": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice(eks.ResolveConflicts_Values(), false), + ValidateFunc: validation.StringInSlice(enum.Slice(types.ResolveConflictsNone, types.ResolveConflictsOverwrite), false), ConflictsWith: []string{"resolve_conflicts"}, }, + "resolve_conflicts_on_update": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ResolveConflicts](), + ConflictsWith: []string{"resolve_conflicts"}, + }, "service_account_role_arn": { Type: schema.TypeString, Optional: true, @@ -122,7 +122,7 @@ func ResourceAddon() *schema.Resource { func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) addonName := d.Get("addon_name").(string) clusterName := d.Get("cluster_name").(string) @@ -143,9 +143,9 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter } if v, ok := d.GetOk("resolve_conflicts"); ok { - input.ResolveConflicts = aws.String(v.(string)) + input.ResolveConflicts = types.ResolveConflicts(v.(string)) } else if v, ok := d.GetOk("resolve_conflicts_on_create"); ok { - input.ResolveConflicts = aws.String(v.(string)) + input.ResolveConflicts = types.ResolveConflicts(v.(string)) } if v, ok := d.GetOk("service_account_role_arn"); ok { @@ -154,15 +154,13 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter _, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateAddonWithContext(ctx, input) + return client.CreateAddon(ctx, input) }, func(err error) (bool, error) { - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "CREATE_FAILED") { - return true, err - } - - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "does not exist") { - return true, err + if errs.IsA[*types.InvalidParameterException](err) { + if strings.Contains(err.Error(), "CREATE_FAILED") || strings.Contains(err.Error(), "does not exist") { + return true, err + } } return false, err @@ -175,7 +173,14 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter d.SetId(id) - if _, err := waitAddonCreated(ctx, conn, clusterName, addonName, d.Timeout(schema.TimeoutCreate)); err != nil { + waiter := eks.NewAddonActiveWaiter(client) + waiterParams := &eks.DescribeAddonInput{ + AddonName: aws.String(addonName), + ClusterName: aws.String(clusterName), + } + + err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutCreate)) + if err != nil { // Creating addon w/o setting resolve_conflicts to "OVERWRITE" // might result in a failed creation, if unmanaged version of addon is already deployed // and there are configuration conflicts: @@ -194,7 +199,7 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, addonName, err := AddonParseResourceID(d.Id()) @@ -202,12 +207,14 @@ func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interfa return sdkdiag.AppendFromErr(diags, err) } - addon, err := FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) + addon, err := FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) - if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] EKS Add-On (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil + if !d.IsNewResource() && errs.IsA[*types.ResourceNotFoundException](err) { + if !d.IsNewResource() { + log.Printf("[WARN] EKS Add-On (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } } if err != nil { @@ -219,8 +226,8 @@ func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interfa d.Set("arn", addon.AddonArn) d.Set("cluster_name", addon.ClusterName) d.Set("configuration_values", addon.ConfigurationValues) - d.Set("created_at", aws.TimeValue(addon.CreatedAt).Format(time.RFC3339)) - d.Set("modified_at", aws.TimeValue(addon.ModifiedAt).Format(time.RFC3339)) + d.Set("created_at", aws.ToTime(addon.CreatedAt).Format(time.RFC3339)) + d.Set("modified_at", aws.ToTime(addon.ModifiedAt).Format(time.RFC3339)) d.Set("service_account_role_arn", addon.ServiceAccountRoleArn) setTagsOut(ctx, addon.Tags) @@ -230,7 +237,7 @@ func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interfa func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, addonName, err := AddonParseResourceID(d.Id()) @@ -253,16 +260,17 @@ func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta inter input.ConfigurationValues = aws.String(d.Get("configuration_values").(string)) } - var conflictResolutionAttr, conflictResolution string + var conflictResolutionAttr string + var conflictResolution types.ResolveConflicts if v, ok := d.GetOk("resolve_conflicts"); ok { conflictResolutionAttr = "resolve_conflicts" - conflictResolution = v.(string) - input.ResolveConflicts = aws.String(v.(string)) + conflictResolution = types.ResolveConflicts(v.(string)) + input.ResolveConflicts = conflictResolution } else if v, ok := d.GetOk("resolve_conflicts_on_update"); ok { conflictResolutionAttr = "resolve_conflicts_on_update" - conflictResolution = v.(string) - input.ResolveConflicts = aws.String(v.(string)) + conflictResolution = types.ResolveConflicts(v.(string)) + input.ResolveConflicts = conflictResolution } // If service account role ARN is already provided, use it. Otherwise, the add-on uses @@ -271,19 +279,19 @@ func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta inter input.ServiceAccountRoleArn = aws.String(d.Get("service_account_role_arn").(string)) } - output, err := conn.UpdateAddonWithContext(ctx, input) + output, err := client.UpdateAddon(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EKS Add-On (%s): %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) - if _, err := waitAddonUpdateSuccessful(ctx, conn, clusterName, addonName, updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { - if conflictResolution != eks.ResolveConflictsOverwrite { + updateID := aws.ToString(output.Update.Id) + if _, err := waitAddonUpdateSuccessful(ctx, client, clusterName, addonName, updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { + if conflictResolution != types.ResolveConflictsOverwrite { // Changing addon version w/o setting resolve_conflicts to "OVERWRITE" // might result in a failed update if there are conflicts: // ConfigurationConflict Apply failed with 1 conflict: conflict with "kubectl"... - return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) update (%s): %s. Consider setting attribute %q to %q", d.Id(), updateID, err, conflictResolutionAttr, eks.ResolveConflictsOverwrite) + return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) update (%s): %s. Consider setting attribute %q to %q", d.Id(), updateID, err, conflictResolutionAttr, string(types.ResolveConflictsOverwrite)) } return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) update (%s): %s", d.Id(), updateID, err) @@ -295,7 +303,7 @@ func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta inter func resourceAddonDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, addonName, err := AddonParseResourceID(d.Id()) @@ -307,20 +315,26 @@ func resourceAddonDelete(ctx context.Context, d *schema.ResourceData, meta inter AddonName: aws.String(addonName), ClusterName: aws.String(clusterName), } - if v, ok := d.GetOk("preserve"); ok { - input.Preserve = aws.Bool(v.(bool)) + input.Preserve = v.(bool) } log.Printf("[DEBUG] Deleting EKS Add-On: %s", d.Id()) - _, err = conn.DeleteAddonWithContext(ctx, input) + _, err = client.DeleteAddon(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting EKS Add-On (%s): %s", d.Id(), err) } - if _, err := waitAddonDeleted(ctx, conn, clusterName, addonName, d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) delete: %s", d.Id(), err) + waiter := eks.NewAddonDeletedWaiter(client) + waiterParams := &eks.DescribeAddonInput{ + AddonName: aws.String(addonName), + ClusterName: aws.String(clusterName), + } + + err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutDelete)) + if err != nil { + sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) delete: %s", d.Id(), err) } return diags diff --git a/internal/service/eks/addon_data_source.go b/internal/service/eks/addon_data_source.go index 7779c12f741..2c6b2e4fd7e 100644 --- a/internal/service/eks/addon_data_source.go +++ b/internal/service/eks/addon_data_source.go @@ -7,7 +7,7 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -60,14 +60,14 @@ func DataSourceAddon() *schema.Resource { } func dataSourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig addonName := d.Get("addon_name").(string) clusterName := d.Get("cluster_name").(string) id := AddonCreateResourceID(clusterName, addonName) - addon, err := FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) + addon, err := FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) if err != nil { return diag.Errorf("reading EKS Add-On (%s): %s", id, err) @@ -77,8 +77,8 @@ func dataSourceAddonRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("addon_version", addon.AddonVersion) d.Set("arn", addon.AddonArn) d.Set("configuration_values", addon.ConfigurationValues) - d.Set("created_at", aws.TimeValue(addon.CreatedAt).Format(time.RFC3339)) - d.Set("modified_at", aws.TimeValue(addon.ModifiedAt).Format(time.RFC3339)) + d.Set("created_at", aws.ToTime(addon.CreatedAt).Format(time.RFC3339)) + d.Set("modified_at", aws.ToTime(addon.ModifiedAt).Format(time.RFC3339)) d.Set("service_account_role_arn", addon.ServiceAccountRoleArn) if err := d.Set("tags", KeyValueTags(ctx, addon.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { diff --git a/internal/service/eks/addon_data_source_test.go b/internal/service/eks/addon_data_source_test.go index 9091f9676b8..cd6444e6eb5 100644 --- a/internal/service/eks/addon_data_source_test.go +++ b/internal/service/eks/addon_data_source_test.go @@ -7,7 +7,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -22,7 +23,7 @@ func TestAccEKSAddonDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ @@ -48,17 +49,17 @@ func TestAccEKSAddonDataSource_configurationValues(t *testing.T) { dataSourceResourceName := "data.aws_eks_addon.test" resourceName := "aws_eks_addon.test" addonName := "vpc-cni" - addonVersion := "v1.10.4-eksbuild.1" + addonVersion := "v1.14.1-eksbuild.1" configurationValues := "{\"env\": {\"WARM_ENI_TARGET\":\"2\",\"ENABLE_POD_ENI\":\"true\"},\"resources\": {\"limits\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"}}}" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonDataSourceConfig_configurationValues(rName, addonName, addonVersion, configurationValues, eks.ResolveConflictsOverwrite), + Config: testAccAddonDataSourceConfig_configurationValues(rName, addonName, addonVersion, configurationValues, string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "addon_version", dataSourceResourceName, "addon_version"), resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceResourceName, "arn"), diff --git a/internal/service/eks/addon_test.go b/internal/service/eks/addon_test.go index 47cd54ac3ae..573b1a76fb2 100644 --- a/internal/service/eks/addon_test.go +++ b/internal/service/eks/addon_test.go @@ -9,7 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +22,7 @@ import ( func TestAccEKSAddon_basic(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) clusterResourceName := "aws_eks_cluster.test" addonResourceName := "aws_eks_addon.test" @@ -29,14 +30,14 @@ func TestAccEKSAddon_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_basic(rName, addonName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAddonExists(ctx, addonResourceName, &addon), + testAccCheckAddonExists(ctx, addonResourceName, addon), resource.TestCheckResourceAttr(addonResourceName, "addon_name", addonName), resource.TestCheckResourceAttrSet(addonResourceName, "addon_version"), acctest.MatchResourceAttrRegionalARN(addonResourceName, "arn", "eks", regexache.MustCompile(fmt.Sprintf("addon/%s/%s/.+$", rName, addonName))), @@ -57,21 +58,21 @@ func TestAccEKSAddon_basic(t *testing.T) { func TestAccEKSAddon_disappears(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_basic(rName, addonName), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon), + testAccCheckAddonExists(ctx, resourceName, addon), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfeks.ResourceAddon(), resourceName), ), ExpectNonEmptyPlan: true, @@ -82,7 +83,7 @@ func TestAccEKSAddon_disappears(t *testing.T) { func TestAccEKSAddon_Disappears_cluster(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" clusterResourceName := "aws_eks_cluster.test" @@ -90,14 +91,14 @@ func TestAccEKSAddon_Disappears_cluster(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_basic(rName, addonName), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon), + testAccCheckAddonExists(ctx, resourceName, addon), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfeks.ResourceCluster(), clusterResourceName), ), ExpectNonEmptyPlan: true, @@ -108,23 +109,23 @@ func TestAccEKSAddon_Disappears_cluster(t *testing.T) { func TestAccEKSAddon_addonVersion(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2 eks.Addon + var addon1, addon2 types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" - addonVersion1 := "v1.12.5-eksbuild.2" - addonVersion2 := "v1.12.6-eksbuild.1" + addonVersion1 := "v1.14.1-eksbuild.1" + addonVersion2 := "v1.15.1-eksbuild.1" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_version(rName, addonName, addonVersion1), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon1), + testAccCheckAddonExists(ctx, resourceName, addon1), resource.TestCheckResourceAttr(resourceName, "addon_version", addonVersion1), ), }, @@ -137,7 +138,7 @@ func TestAccEKSAddon_addonVersion(t *testing.T) { { Config: testAccAddonConfig_version(rName, addonName, addonVersion2), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon2), + testAccCheckAddonExists(ctx, resourceName, addon2), resource.TestCheckResourceAttr(resourceName, "addon_version", addonVersion2), ), }, @@ -147,21 +148,21 @@ func TestAccEKSAddon_addonVersion(t *testing.T) { func TestAccEKSAddon_preserve(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_preserve(rName, addonName), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon), + testAccCheckAddonExists(ctx, resourceName, addon), resource.TestCheckResourceAttr(resourceName, "preserve", "true"), ), }, @@ -177,22 +178,22 @@ func TestAccEKSAddon_preserve(t *testing.T) { func TestAccEKSAddon_deprecated(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2, addon3 eks.Addon + var addon1, addon2, addon3 types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonConfig_deprecated(rName, addonName, eks.ResolveConflictsNone), + Config: testAccAddonConfig_deprecated(rName, addonName, string(types.ResolveConflictsNone)), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon1), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", eks.ResolveConflictsNone), + testAccCheckAddonExists(ctx, resourceName, addon1), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", string(types.ResolveConflictsNone)), ), }, { @@ -202,17 +203,17 @@ func TestAccEKSAddon_deprecated(t *testing.T) { ImportStateVerifyIgnore: []string{"resolve_conflicts"}, }, { - Config: testAccAddonConfig_deprecated(rName, addonName, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_deprecated(rName, addonName, string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon2), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", eks.ResolveConflictsOverwrite), + testAccCheckAddonExists(ctx, resourceName, addon2), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", string(types.ResolveConflictsOverwrite)), ), }, { - Config: testAccAddonConfig_deprecated(rName, addonName, eks.ResolveConflictsPreserve), + Config: testAccAddonConfig_deprecated(rName, addonName, string(types.ResolveConflictsPreserve)), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon3), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", eks.ResolveConflictsPreserve), + testAccCheckAddonExists(ctx, resourceName, addon3), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", string(types.ResolveConflictsPreserve)), ), }, }, @@ -221,23 +222,23 @@ func TestAccEKSAddon_deprecated(t *testing.T) { func TestAccEKSAddon_resolveConflicts(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2, addon3 eks.Addon + var addon1, addon2, addon3 types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonConfig_resolveConflicts(rName, addonName, eks.ResolveConflictsNone, eks.ResolveConflictsNone), + Config: testAccAddonConfig_resolveConflicts(rName, addonName, string(types.ResolveConflictsNone), string(types.ResolveConflictsNone)), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon1), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", eks.ResolveConflictsNone), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", eks.ResolveConflictsNone), + testAccCheckAddonExists(ctx, resourceName, addon1), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", string(types.ResolveConflictsNone)), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", string(types.ResolveConflictsNone)), ), }, { @@ -247,19 +248,19 @@ func TestAccEKSAddon_resolveConflicts(t *testing.T) { ImportStateVerifyIgnore: []string{"resolve_conflicts_on_create", "resolve_conflicts_on_update"}, }, { - Config: testAccAddonConfig_resolveConflicts(rName, addonName, eks.ResolveConflictsOverwrite, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_resolveConflicts(rName, addonName, string(types.ResolveConflictsOverwrite), string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon2), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", eks.ResolveConflictsOverwrite), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", eks.ResolveConflictsOverwrite), + testAccCheckAddonExists(ctx, resourceName, addon2), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", string(types.ResolveConflictsOverwrite)), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", string(types.ResolveConflictsOverwrite)), ), }, { - Config: testAccAddonConfig_resolveConflicts(rName, addonName, eks.ResolveConflictsOverwrite, eks.ResolveConflictsPreserve), + Config: testAccAddonConfig_resolveConflicts(rName, addonName, string(types.ResolveConflictsOverwrite), string(types.ResolveConflictsPreserve)), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon3), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", eks.ResolveConflictsOverwrite), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", eks.ResolveConflictsPreserve), + testAccCheckAddonExists(ctx, resourceName, addon3), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", string(types.ResolveConflictsOverwrite)), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", string(types.ResolveConflictsPreserve)), ), }, }, @@ -268,7 +269,7 @@ func TestAccEKSAddon_resolveConflicts(t *testing.T) { func TestAccEKSAddon_serviceAccountRoleARN(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" serviceRoleResourceName := "aws_iam_role.test-service-role" @@ -276,14 +277,14 @@ func TestAccEKSAddon_serviceAccountRoleARN(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_serviceAccountRoleARN(rName, addonName), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon), + testAccCheckAddonExists(ctx, resourceName, addon), resource.TestCheckResourceAttrPair(resourceName, "service_account_role_arn", serviceRoleResourceName, "arn"), ), }, @@ -298,7 +299,7 @@ func TestAccEKSAddon_serviceAccountRoleARN(t *testing.T) { func TestAccEKSAddon_configurationValues(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" configurationValues := "{\"env\": {\"WARM_ENI_TARGET\":\"2\",\"ENABLE_POD_ENI\":\"true\"},\"resources\": {\"limits\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"}}}" @@ -306,18 +307,18 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { emptyConfigurationValues := "{}" invalidConfigurationValues := "{\"env\": {\"INVALID_FIELD\":\"2\"}}" addonName := "vpc-cni" - addonVersion := "v1.12.6-eksbuild.1" + addonVersion := "v1.14.1-eksbuild.1" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, configurationValues, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, configurationValues, string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon), + testAccCheckAddonExists(ctx, resourceName, addon), resource.TestCheckResourceAttr(resourceName, "configuration_values", configurationValues), ), }, @@ -328,21 +329,21 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { ImportStateVerifyIgnore: []string{"resolve_conflicts"}, }, { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, updateConfigurationValues, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, updateConfigurationValues, string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon), + testAccCheckAddonExists(ctx, resourceName, addon), resource.TestCheckResourceAttr(resourceName, "configuration_values", updateConfigurationValues), ), }, { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, emptyConfigurationValues, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, emptyConfigurationValues, string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon), + testAccCheckAddonExists(ctx, resourceName, addon), resource.TestCheckResourceAttr(resourceName, "configuration_values", emptyConfigurationValues), ), }, { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, invalidConfigurationValues, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, invalidConfigurationValues, string(types.ResolveConflictsOverwrite)), ExpectError: regexache.MustCompile(`InvalidParameterException: ConfigurationValue provided in request is not supported`), }, }, @@ -351,21 +352,21 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { func TestAccEKSAddon_tags(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2, addon3 eks.Addon + var addon1, addon2, addon3 types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_tags1(rName, addonName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon1), + testAccCheckAddonExists(ctx, resourceName, addon1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -378,7 +379,7 @@ func TestAccEKSAddon_tags(t *testing.T) { { Config: testAccAddonConfig_tags2(rName, addonName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon2), + testAccCheckAddonExists(ctx, resourceName, addon2), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -387,7 +388,7 @@ func TestAccEKSAddon_tags(t *testing.T) { { Config: testAccAddonConfig_tags1(rName, addonName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, &addon3), + testAccCheckAddonExists(ctx, resourceName, addon3), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -396,7 +397,7 @@ func TestAccEKSAddon_tags(t *testing.T) { }) } -func testAccCheckAddonExists(ctx context.Context, n string, v *eks.Addon) resource.TestCheckFunc { +func testAccCheckAddonExists(ctx context.Context, n string, v types.Addon) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -413,15 +414,15 @@ func testAccCheckAddonExists(ctx context.Context, n string, v *eks.Addon) resour return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - output, err := tfeks.FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) + output, err := tfeks.FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) if err != nil { return err } - *v = *output + v = *output return nil } @@ -429,7 +430,7 @@ func testAccCheckAddonExists(ctx context.Context, n string, v *eks.Addon) resour func testAccCheckAddonDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_addon" { @@ -442,7 +443,7 @@ func testAccCheckAddonDestroy(ctx context.Context) resource.TestCheckFunc { return err } - _, err = tfeks.FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) + _, err = tfeks.FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) if tfresource.NotFound(err) { continue @@ -460,11 +461,9 @@ func testAccCheckAddonDestroy(ctx context.Context) resource.TestCheckFunc { } func testAccPreCheckAddon(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) - + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) input := &eks.DescribeAddonVersionsInput{} - - _, err := conn.DescribeAddonVersionsWithContext(ctx, input) + _, err := client.DescribeAddonVersions(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/eks/addon_version_data_source.go b/internal/service/eks/addon_version_data_source.go index 47264e58ea5..066b732d501 100644 --- a/internal/service/eks/addon_version_data_source.go +++ b/internal/service/eks/addon_version_data_source.go @@ -40,14 +40,14 @@ func DataSourceAddonVersion() *schema.Resource { } func dataSourceAddonVersionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) addonName := d.Get("addon_name").(string) kubernetesVersion := d.Get("kubernetes_version").(string) mostRecent := d.Get("most_recent").(bool) id := addonName - versionInfo, err := FindAddonVersionByAddonNameAndKubernetesVersion(ctx, conn, id, kubernetesVersion, mostRecent) + versionInfo, err := FindAddonVersionByAddonNameAndKubernetesVersion(ctx, client, id, kubernetesVersion, mostRecent) if err != nil { return diag.Errorf("reading EKS Add-On version info (%s, %s): %s", id, kubernetesVersion, err) diff --git a/internal/service/eks/addon_version_data_source_test.go b/internal/service/eks/addon_version_data_source_test.go index 576fc022b33..d5b40dcd83f 100644 --- a/internal/service/eks/addon_version_data_source_test.go +++ b/internal/service/eks/addon_version_data_source_test.go @@ -7,7 +7,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -15,7 +16,7 @@ import ( func TestAccEKSAddonVersionDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) versionDataSourceName := "data.aws_eks_addon_version.test" addonDataSourceName := "data.aws_eks_addon.test" @@ -23,26 +24,26 @@ func TestAccEKSAddonVersionDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, true), + Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, false), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, addonDataSourceName, &addon), + testAccCheckAddonExists(ctx, addonDataSourceName, addon), resource.TestCheckResourceAttrPair(versionDataSourceName, "version", addonDataSourceName, "addon_version"), resource.TestCheckResourceAttrPair(versionDataSourceName, "addon_name", addonDataSourceName, "addon_name"), - resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "true"), + resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "false"), ), }, { - Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, false), + Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, true), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, addonDataSourceName, &addon), + testAccCheckAddonExists(ctx, addonDataSourceName, addon), resource.TestCheckResourceAttrPair(versionDataSourceName, "version", addonDataSourceName, "addon_version"), resource.TestCheckResourceAttrPair(versionDataSourceName, "addon_name", addonDataSourceName, "addon_name"), - resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "false"), + resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "true"), ), }, }, diff --git a/internal/service/eks/arn.go b/internal/service/eks/arn.go index a8ece1dada7..59cfad8b72d 100644 --- a/internal/service/eks/arn.go +++ b/internal/service/eks/arn.go @@ -16,7 +16,7 @@ import ( "fmt" "strings" - awsarn "github.com/aws/aws-sdk-go/aws/arn" + awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go/aws/endpoints" ) diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index 83bf69868ea..d12c3b29aac 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -6,18 +6,21 @@ package eks import ( "context" "log" + "strings" "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -81,8 +84,8 @@ func ResourceCluster() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(eks.LogType_Values(), true), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[types.LogType](), }, Set: schema.HashString, }, @@ -111,7 +114,7 @@ func ResourceCluster() *schema.Resource { Required: true, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(Resources_Values(), false), + ValidateFunc: validation.StringInSlice([]string{"secrets"}, false), }, }, }, @@ -150,11 +153,11 @@ func ResourceCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "ip_family": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(eks.IpFamily_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.IpFamily](), }, "service_ipv4_cidr": { Type: schema.TypeString, @@ -292,7 +295,7 @@ func ResourceCluster() *schema.Resource { } func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) name := d.Get("name").(string) input := &eks.CreateClusterInput{ @@ -318,31 +321,30 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateClusterWithContext(ctx, input) + return client.CreateCluster(ctx, input) }, func(err error) (bool, error) { - // InvalidParameterException: roleArn, arn:aws:iam::123456789012:role/XXX, does not exist - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "does not exist") { - return true, err - } - - // InvalidParameterException: Error in role params - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "Error in role params") { - return true, err - } - - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "Role could not be assumed because the trusted entity is not correct") { - return true, err - } - - // InvalidParameterException: The provided role doesn't have the Amazon EKS Managed Policies associated with it. Please ensure the following policy is attached: arn:aws:iam::aws:policy/AmazonEKSClusterPolicy - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "The provided role doesn't have the Amazon EKS Managed Policies associated with it") { - return true, err - } - - // InvalidParameterException: IAM role's policy must include the `ec2:DescribeSubnets` action - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "IAM role's policy must include") { - return true, err + if errs.IsA[*types.InvalidParameterException](err) { + // InvalidParameterException: roleArn, arn:aws:iam::123456789012:role/XXX, does not exist + if strings.Contains(err.Error(), "does not exist") { + return true, err + } + // InvalidParameterException: Error in role params + if strings.Contains(err.Error(), "Error in role params") { + return true, err + } + if strings.Contains(err.Error(), "Role could not be assumed because the trusted entity is not correct") { + return true, err + } + // InvalidParameterException: The provided role doesn't have the Amazon EKS Managed Policies associated with it. + // Please ensure the following policy is attached: arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + if strings.Contains(err.Error(), "The provided role doesn't have the Amazon EKS Managed Policies associated with it") { + return true, err + } + // InvalidParameterException: IAM role's policy must include the `ec2:DescribeSubnets` action + if strings.Contains(err.Error(), "IAM role's policy must include") { + return true, err + } } return false, err @@ -353,9 +355,15 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int return diag.Errorf("creating EKS Cluster (%s): %s", name, err) } - d.SetId(aws.StringValue(outputRaw.(*eks.CreateClusterOutput).Cluster.Name)) + d.SetId(aws.ToString(outputRaw.(*eks.CreateClusterOutput).Cluster.Name)) + + waiter := eks.NewClusterActiveWaiter(client) + waiterParams := &eks.DescribeClusterInput{ + Name: aws.String(d.Id()), + } - if _, err := waitClusterCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutCreate)) + if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) create: %s", d.Id(), err) } @@ -363,9 +371,9 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) - cluster, err := FindClusterByName(ctx, conn, d.Id()) + cluster, err := FindClusterByName(ctx, client, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Cluster (%s) not found, removing from state", d.Id()) @@ -385,7 +393,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter if cluster.OutpostConfig != nil { d.Set("cluster_id", cluster.Id) } - d.Set("created_at", aws.TimeValue(cluster.CreatedAt).String()) + d.Set("created_at", aws.ToTime(cluster.CreatedAt).Format(time.RFC3339)) if err := d.Set("enabled_cluster_log_types", flattenLogging(cluster.Logging)); err != nil { return diag.Errorf("setting enabled_cluster_log_types: %s", err) } @@ -417,7 +425,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter } func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) // Do any version update first. if d.HasChange("version") { @@ -426,15 +434,15 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int Version: aws.String(d.Get("version").(string)), } - output, err := conn.UpdateClusterVersionWithContext(ctx, input) + output, err := client.UpdateClusterVersion(ctx, input) if err != nil { return diag.Errorf("updating EKS Cluster (%s) version: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) - _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitClusterUpdateSuccessful(ctx, client, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) version update (%s): %s", d.Id(), updateID, err) @@ -450,15 +458,15 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int EncryptionConfig: expandEncryptionConfig(d.Get("encryption_config").([]interface{})), } - output, err := conn.AssociateEncryptionConfigWithContext(ctx, input) + output, err := client.AssociateEncryptionConfig(ctx, input) if err != nil { return diag.Errorf("associating EKS Cluster (%s) encryption config: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) - _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitClusterUpdateSuccessful(ctx, client, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) encryption config association (%s): %s", d.Id(), updateID, err) @@ -472,15 +480,15 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int Name: aws.String(d.Id()), } - output, err := conn.UpdateClusterConfigWithContext(ctx, input) + output, err := client.UpdateClusterConfig(ctx, input) if err != nil { return diag.Errorf("updating EKS Cluster (%s) logging: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) - _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitClusterUpdateSuccessful(ctx, client, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) logging update (%s): %s", d.Id(), updateID, err) @@ -493,15 +501,15 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int ResourcesVpcConfig: expandVPCConfigRequestForUpdate(d.Get("vpc_config").([]interface{})), } - output, err := conn.UpdateClusterConfigWithContext(ctx, input) + output, err := client.UpdateClusterConfig(ctx, input) if err != nil { return diag.Errorf("updating EKS Cluster (%s) VPC config: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) - _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitClusterUpdateSuccessful(ctx, client, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) VPC config update (%s): %s", d.Id(), updateID, err) @@ -512,7 +520,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) log.Printf("[DEBUG] Deleting EKS Cluster: %s", d.Id()) @@ -525,9 +533,9 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int err := tfresource.Retry(ctx, clusterDeleteRetryTimeout, func() *retry.RetryError { var err error - _, err = conn.DeleteClusterWithContext(ctx, input) + _, err = client.DeleteCluster(ctx, input) - if tfawserr.ErrMessageContains(err, eks.ErrCodeResourceInUseException, "in progress") { + if errs.IsA[*types.ResourceInUseException](err) { return retry.RetryableError(err) } @@ -539,45 +547,61 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int }, tfresource.WithDelayRand(1*time.Minute), tfresource.WithPollInterval(30*time.Second)) if tfresource.TimedOut(err) { - _, err = conn.DeleteClusterWithContext(ctx, input) + _, err = client.DeleteCluster(ctx, input) } - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil } // Sometimes the EKS API returns the ResourceNotFound error in this form: // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if tfawserr.ErrMessageContains(err, eks.ErrCodeClientException, "No cluster found for name:") { - return nil + if errs.IsA[*types.ClientException](err) { + if strings.Contains(err.Error(), "No cluster found for name:") { + return nil + } } if err != nil { return diag.Errorf("deleting EKS Cluster (%s): %s", d.Id(), err) } - if _, err = waitClusterDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + waiter := eks.NewClusterDeletedWaiter(client) + waiterParams := &eks.DescribeClusterInput{ + Name: aws.String(d.Id()), + } + + err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutDelete)) + if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) delete: %s", d.Id(), err) } return nil } -func FindClusterByName(ctx context.Context, conn *eks.EKS, name string) (*eks.Cluster, error) { +func FindClusterByName(ctx context.Context, client *eks.Client, name string) (*types.Cluster, error) { input := &eks.DescribeClusterInput{ Name: aws.String(name), } - output, err := conn.DescribeClusterWithContext(ctx, input) + output, err := client.DescribeCluster(ctx, input) // Sometimes the EKS API returns the ResourceNotFound error in this form: // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) || tfawserr.ErrMessageContains(err, eks.ErrCodeClientException, "No cluster found for name:") { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, } } + if errs.IsA[*types.ClientException](err) { + if strings.Contains(err.Error(), "No cluster found for name:") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + } if err != nil { return nil, err @@ -590,15 +614,15 @@ func FindClusterByName(ctx context.Context, conn *eks.EKS, name string) (*eks.Cl return output.Cluster, nil } -func findClusterUpdateByTwoPartKey(ctx context.Context, conn *eks.EKS, name, id string) (*eks.Update, error) { +func findClusterUpdateByTwoPartKey(ctx context.Context, client *eks.Client, name, id string) (*types.Update, error) { input := &eks.DescribeUpdateInput{ Name: aws.String(name), UpdateId: aws.String(id), } - output, err := conn.DescribeUpdateWithContext(ctx, input) + output, err := client.DescribeUpdate(ctx, input) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -616,9 +640,9 @@ func findClusterUpdateByTwoPartKey(ctx context.Context, conn *eks.EKS, name, id return output.Update, nil } -func statusCluster(ctx context.Context, conn *eks.EKS, name string) retry.StateRefreshFunc { +func statusCluster(ctx context.Context, client *eks.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindClusterByName(ctx, conn, name) + output, err := FindClusterByName(ctx, client, name) if tfresource.NotFound(err) { return nil, "", nil @@ -628,13 +652,13 @@ func statusCluster(ctx context.Context, conn *eks.EKS, name string) retry.StateR return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func statusClusterUpdate(ctx context.Context, conn *eks.EKS, name, id string) retry.StateRefreshFunc { +func statusClusterUpdate(ctx context.Context, client *eks.Client, name, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := findClusterUpdateByTwoPartKey(ctx, conn, name, id) + output, err := findClusterUpdateByTwoPartKey(ctx, client, name, id) if tfresource.NotFound(err) { return nil, "", nil @@ -644,56 +668,22 @@ func statusClusterUpdate(ctx context.Context, conn *eks.EKS, name, id string) re return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func waitClusterCreated(ctx context.Context, conn *eks.EKS, name string, timeout time.Duration) (*eks.Cluster, error) { +func waitClusterUpdateSuccessful(ctx context.Context, client *eks.Client, name, id string, timeout time.Duration) (*types.Update, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{eks.ClusterStatusPending, eks.ClusterStatusCreating}, - Target: []string{eks.ClusterStatusActive}, - Refresh: statusCluster(ctx, conn, name), + Pending: enum.Slice(types.UpdateStatusInProgress), + Target: enum.Slice(types.UpdateStatusSuccessful), + Refresh: statusClusterUpdate(ctx, client, name, id), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*eks.Cluster); ok { - return output, err - } - - return nil, err -} - -func waitClusterDeleted(ctx context.Context, conn *eks.EKS, name string, timeout time.Duration) (*eks.Cluster, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.ClusterStatusActive, eks.ClusterStatusDeleting}, - Target: []string{}, - Refresh: statusCluster(ctx, conn, name), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Cluster); ok { - return output, err - } - - return nil, err -} - -func waitClusterUpdateSuccessful(ctx context.Context, conn *eks.EKS, name, id string, timeout time.Duration) (*eks.Update, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.UpdateStatusInProgress}, - Target: []string{eks.UpdateStatusSuccessful}, - Refresh: statusClusterUpdate(ctx, conn, name, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Update); ok { - if status := aws.StringValue(output.Status); status == eks.UpdateStatusCancelled || status == eks.UpdateStatusFailed { + if output, ok := outputRaw.(*types.Update); ok { + if status := output.Status; status == types.UpdateStatusCancelled || status == types.UpdateStatusFailed { tfresource.SetLastError(err, ErrorDetailsError(output.Errors)) } @@ -703,12 +693,12 @@ func waitClusterUpdateSuccessful(ctx context.Context, conn *eks.EKS, name, id st return nil, err } -func expandEncryptionConfig(tfList []interface{}) []*eks.EncryptionConfig { +func expandEncryptionConfig(tfList []interface{}) []types.EncryptionConfig { if len(tfList) == 0 { return nil } - var apiObjects []*eks.EncryptionConfig + var apiObjects []types.EncryptionConfig for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -717,28 +707,35 @@ func expandEncryptionConfig(tfList []interface{}) []*eks.EncryptionConfig { continue } - apiObject := &eks.EncryptionConfig{ + apiObject := &types.EncryptionConfig{ Provider: expandProvider(tfMap["provider"].([]interface{})), } if v, ok := tfMap["resources"].(*schema.Set); ok && v.Len() > 0 { - apiObject.Resources = flex.ExpandStringSet(v) + apiObject.Resources = make([]string, v.Len()) + for i, r := range v.List() { + apiObject.Resources[i] = r.(string) + } } - apiObjects = append(apiObjects, apiObject) + apiObjects = append(apiObjects, *apiObject) } return apiObjects } -func expandProvider(tfList []interface{}) *eks.Provider { +func expandProvider(tfList []interface{}) *types.Provider { + if len(tfList) == 0 { + return nil + } + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - apiObject := &eks.Provider{} + apiObject := &types.Provider{} if v, ok := tfMap["key_arn"].(string); ok && v != "" { apiObject.KeyArn = aws.String(v) @@ -747,14 +744,18 @@ func expandProvider(tfList []interface{}) *eks.Provider { return apiObject } -func expandOutpostConfigRequest(l []interface{}) *eks.OutpostConfigRequest { - tfMap, ok := l[0].(map[string]interface{}) +func expandOutpostConfigRequest(tfList []interface{}) *types.OutpostConfigRequest { + if len(tfList) == 0 { + return nil + } + + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - outpostConfigRequest := &eks.OutpostConfigRequest{} + outpostConfigRequest := &types.OutpostConfigRequest{} if v, ok := tfMap["control_plane_instance_type"].(string); ok && v != "" { outpostConfigRequest.ControlPlaneInstanceType = aws.String(v) @@ -765,13 +766,17 @@ func expandOutpostConfigRequest(l []interface{}) *eks.OutpostConfigRequest { } if v, ok := tfMap["outpost_arns"].(*schema.Set); ok && v.Len() > 0 { - outpostConfigRequest.OutpostArns = flex.ExpandStringSet(v) + outpostArns := make([]string, 0, v.Len()) + for _, outpostArn := range flex.ExpandStringSet(v) { + outpostArns = append(outpostArns, *outpostArn) + } + outpostConfigRequest.OutpostArns = outpostArns } return outpostConfigRequest } -func expandControlPlanePlacement(tfList []interface{}) *eks.ControlPlanePlacementRequest { +func expandControlPlanePlacement(tfList []interface{}) *types.ControlPlanePlacementRequest { if len(tfList) == 0 { return nil } @@ -782,7 +787,7 @@ func expandControlPlanePlacement(tfList []interface{}) *eks.ControlPlanePlacemen return nil } - apiObject := &eks.ControlPlanePlacementRequest{} + apiObject := &types.ControlPlanePlacementRequest{} if v, ok := tfMap["group_name"].(string); ok && v != "" { apiObject.GroupName = aws.String(v) @@ -791,100 +796,133 @@ func expandControlPlanePlacement(tfList []interface{}) *eks.ControlPlanePlacemen return apiObject } -func expandVPCConfigRequestForCreate(l []interface{}) *eks.VpcConfigRequest { - if len(l) == 0 { +func expandVPCConfigRequestForCreate(tfList []interface{}) *types.VpcConfigRequest { + if len(tfList) == 0 { return nil } - m := l[0].(map[string]interface{}) + m := tfList[0].(map[string]interface{}) + + securityGroupIds := flex.ExpandStringSet(m["security_group_ids"].(*schema.Set)) + securityGroupIdsSlice := make([]string, len(securityGroupIds)) + for i, id := range securityGroupIds { + securityGroupIdsSlice[i] = *id + } + + subnetIds := flex.ExpandStringSet(m["subnet_ids"].(*schema.Set)) + subnetIdsSlice := make([]string, len(subnetIds)) + for i, id := range subnetIds { + subnetIdsSlice[i] = *id + } - vpcConfigRequest := &eks.VpcConfigRequest{ + vpcConfigRequest := &types.VpcConfigRequest{ EndpointPrivateAccess: aws.Bool(m["endpoint_private_access"].(bool)), EndpointPublicAccess: aws.Bool(m["endpoint_public_access"].(bool)), - SecurityGroupIds: flex.ExpandStringSet(m["security_group_ids"].(*schema.Set)), - SubnetIds: flex.ExpandStringSet(m["subnet_ids"].(*schema.Set)), + SecurityGroupIds: securityGroupIdsSlice, + SubnetIds: subnetIdsSlice, } if v, ok := m["public_access_cidrs"].(*schema.Set); ok && v.Len() > 0 { - vpcConfigRequest.PublicAccessCidrs = flex.ExpandStringSet(v) + publicAccessCidrs := flex.ExpandStringSet(v) + vpcConfigRequest.PublicAccessCidrs = make([]string, len(publicAccessCidrs)) + for i, cidr := range publicAccessCidrs { + vpcConfigRequest.PublicAccessCidrs[i] = *cidr + } } return vpcConfigRequest } -func expandVPCConfigRequestForUpdate(l []interface{}) *eks.VpcConfigRequest { - if len(l) == 0 { +func expandVPCConfigRequestForUpdate(tfList []interface{}) *types.VpcConfigRequest { + if len(tfList) == 0 { return nil } - m := l[0].(map[string]interface{}) + m := tfList[0].(map[string]interface{}) - vpcConfigRequest := &eks.VpcConfigRequest{ + vpcConfigRequest := &types.VpcConfigRequest{ EndpointPrivateAccess: aws.Bool(m["endpoint_private_access"].(bool)), EndpointPublicAccess: aws.Bool(m["endpoint_public_access"].(bool)), } if v, ok := m["public_access_cidrs"].(*schema.Set); ok && v.Len() > 0 { - vpcConfigRequest.PublicAccessCidrs = flex.ExpandStringSet(v) + publicAccessCidrs := flex.ExpandStringSet(v) + vpcConfigRequest.PublicAccessCidrs = make([]string, len(publicAccessCidrs)) + for i, cidr := range publicAccessCidrs { + vpcConfigRequest.PublicAccessCidrs[i] = *cidr + } } return vpcConfigRequest } -func expandKubernetesNetworkConfigRequest(tfList []interface{}) *eks.KubernetesNetworkConfigRequest { - tfMap, ok := tfList[0].(map[string]interface{}) - - if !ok { +func expandKubernetesNetworkConfigRequest(tfList []interface{}) *types.KubernetesNetworkConfigRequest { + if len(tfList) == 0 { return nil } - apiObject := &eks.KubernetesNetworkConfigRequest{} + m := tfList[0].(map[string]interface{}) - if v, ok := tfMap["service_ipv4_cidr"].(string); ok && v != "" { + apiObject := &types.KubernetesNetworkConfigRequest{} + + if v, ok := m["service_ipv4_cidr"].(string); ok && v != "" { apiObject.ServiceIpv4Cidr = aws.String(v) } - if v, ok := tfMap["ip_family"].(string); ok && v != "" { - apiObject.IpFamily = aws.String(v) + if v, ok := m["ip_family"]; ok && v != "" { + apiObject.IpFamily = v.(types.IpFamily) } return apiObject } -func expandLogging(vEnabledLogTypes *schema.Set) *eks.Logging { - vEksLogTypes := []interface{}{} - for _, eksLogType := range eks.LogType_Values() { - vEksLogTypes = append(vEksLogTypes, eksLogType) +func expandLogging(vEnabledLogTypes *schema.Set) *types.Logging { + logTypes := []interface{}{} + + for _, logType := range enum.Values[types.LogType]() { + logTypes = append(logTypes, logType) + } + aLogTypes := schema.NewSet(schema.HashString, logTypes) + + enabledLogTypes := make([]types.LogType, len(vEnabledLogTypes.List())) + for i, s := range vEnabledLogTypes.List() { + enabledLogTypes[i] = types.LogType(s.(string)) + } + + diff := aLogTypes.Difference(vEnabledLogTypes) + + disabledLogTypes := make([]types.LogType, len(diff.List())) + for i, s := range diff.List() { + disabledLogTypes[i] = types.LogType(s.(string)) } - vAllLogTypes := schema.NewSet(schema.HashString, vEksLogTypes) - return &eks.Logging{ - ClusterLogging: []*eks.LogSetup{ + return &types.Logging{ + ClusterLogging: []types.LogSetup{ { Enabled: aws.Bool(true), - Types: flex.ExpandStringSet(vEnabledLogTypes), + Types: enabledLogTypes, }, { Enabled: aws.Bool(false), - Types: flex.ExpandStringSet(vAllLogTypes.Difference(vEnabledLogTypes)), + Types: disabledLogTypes, }, }, } } -func flattenCertificate(certificate *eks.Certificate) []map[string]interface{} { +func flattenCertificate(certificate *types.Certificate) []map[string]interface{} { if certificate == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "data": aws.StringValue(certificate.Data), + "data": certificate.Data, } return []map[string]interface{}{m} } -func flattenIdentity(identity *eks.Identity) []map[string]interface{} { +func flattenIdentity(identity *types.Identity) []map[string]interface{} { if identity == nil { return []map[string]interface{}{} } @@ -896,19 +934,19 @@ func flattenIdentity(identity *eks.Identity) []map[string]interface{} { return []map[string]interface{}{m} } -func flattenOIDC(oidc *eks.OIDC) []map[string]interface{} { +func flattenOIDC(oidc *types.OIDC) []map[string]interface{} { if oidc == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "issuer": aws.StringValue(oidc.Issuer), + "issuer": oidc.Issuer, } return []map[string]interface{}{m} } -func flattenEncryptionConfig(apiObjects []*eks.EncryptionConfig) []interface{} { +func flattenEncryptionConfig(apiObjects []types.EncryptionConfig) []interface{} { if len(apiObjects) == 0 { return nil } @@ -918,7 +956,7 @@ func flattenEncryptionConfig(apiObjects []*eks.EncryptionConfig) []interface{} { for _, apiObject := range apiObjects { tfMap := map[string]interface{}{ "provider": flattenProvider(apiObject.Provider), - "resources": aws.StringValueSlice(apiObject.Resources), + "resources": apiObject.Resources, } tfList = append(tfList, tfMap) @@ -927,43 +965,58 @@ func flattenEncryptionConfig(apiObjects []*eks.EncryptionConfig) []interface{} { return tfList } -func flattenProvider(apiObject *eks.Provider) []interface{} { +func flattenProvider(apiObject *types.Provider) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "key_arn": aws.StringValue(apiObject.KeyArn), + "key_arn": apiObject.KeyArn, } return []interface{}{tfMap} } -func flattenVPCConfigResponse(vpcConfig *eks.VpcConfigResponse) []map[string]interface{} { +func flattenVPCConfigResponse(vpcConfig *types.VpcConfigResponse) []map[string]interface{} { if vpcConfig == nil { return []map[string]interface{}{} } + securityGroupIds := make([]*string, len(vpcConfig.SecurityGroupIds)) + for i, id := range vpcConfig.SecurityGroupIds { + securityGroupIds[i] = aws.String(id) + } + + subnetIds := make([]*string, len(vpcConfig.SubnetIds)) + for i, id := range vpcConfig.SubnetIds { + subnetIds[i] = aws.String(id) + } + + publicAccessCidrs := make([]*string, len(vpcConfig.PublicAccessCidrs)) + for i, cidr := range vpcConfig.PublicAccessCidrs { + publicAccessCidrs[i] = aws.String(cidr) + } + m := map[string]interface{}{ - "cluster_security_group_id": aws.StringValue(vpcConfig.ClusterSecurityGroupId), - "endpoint_private_access": aws.BoolValue(vpcConfig.EndpointPrivateAccess), - "endpoint_public_access": aws.BoolValue(vpcConfig.EndpointPublicAccess), - "security_group_ids": flex.FlattenStringSet(vpcConfig.SecurityGroupIds), - "subnet_ids": flex.FlattenStringSet(vpcConfig.SubnetIds), - "public_access_cidrs": flex.FlattenStringSet(vpcConfig.PublicAccessCidrs), - "vpc_id": aws.StringValue(vpcConfig.VpcId), + "cluster_security_group_id": vpcConfig.ClusterSecurityGroupId, + "endpoint_private_access": vpcConfig.EndpointPrivateAccess, + "endpoint_public_access": vpcConfig.EndpointPublicAccess, + "security_group_ids": flex.FlattenStringSet(securityGroupIds), + "subnet_ids": flex.FlattenStringSet(subnetIds), + "public_access_cidrs": flex.FlattenStringSet(publicAccessCidrs), + "vpc_id": vpcConfig.VpcId, } return []map[string]interface{}{m} } -func flattenLogging(logging *eks.Logging) *schema.Set { - enabledLogTypes := []*string{} +func flattenLogging(logging *types.Logging) *schema.Set { + enabledLogTypes := []types.LogType{} if logging != nil { logSetups := logging.ClusterLogging for _, logSetup := range logSetups { - if logSetup == nil || !aws.BoolValue(logSetup.Enabled) { + if !aws.ToBool(logSetup.Enabled) { continue } @@ -971,44 +1024,49 @@ func flattenLogging(logging *eks.Logging) *schema.Set { } } - return flex.FlattenStringSet(enabledLogTypes) + enabledLogTypePointers := make([]*string, len(enabledLogTypes)) + for i, logType := range enabledLogTypes { + enabledLogTypePointers[i] = aws.String(string(logType)) + } + + return flex.FlattenStringSet(enabledLogTypePointers) } -func flattenKubernetesNetworkConfigResponse(apiObject *eks.KubernetesNetworkConfigResponse) []interface{} { +func flattenKubernetesNetworkConfigResponse(apiObject *types.KubernetesNetworkConfigResponse) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "service_ipv4_cidr": aws.StringValue(apiObject.ServiceIpv4Cidr), - "service_ipv6_cidr": aws.StringValue(apiObject.ServiceIpv6Cidr), - "ip_family": aws.StringValue(apiObject.IpFamily), + "service_ipv4_cidr": apiObject.ServiceIpv4Cidr, + "service_ipv6_cidr": apiObject.ServiceIpv6Cidr, + "ip_family": apiObject.IpFamily, } return []interface{}{tfMap} } -func flattenOutpostConfigResponse(apiObject *eks.OutpostConfigResponse) []interface{} { +func flattenOutpostConfigResponse(apiObject *types.OutpostConfigResponse) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "control_plane_instance_type": aws.StringValue(apiObject.ControlPlaneInstanceType), + "control_plane_instance_type": apiObject.ControlPlaneInstanceType, "control_plane_placement": flattenControlPlanePlacementResponse(apiObject.ControlPlanePlacement), - "outpost_arns": aws.StringValueSlice(apiObject.OutpostArns), + "outpost_arns": apiObject.OutpostArns, } return []interface{}{tfMap} } -func flattenControlPlanePlacementResponse(apiObject *eks.ControlPlanePlacementResponse) []interface{} { +func flattenControlPlanePlacementResponse(apiObject *types.ControlPlanePlacementResponse) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "group_name": aws.StringValue(apiObject.GroupName), + "group_name": apiObject.GroupName, } return []interface{}{tfMap} diff --git a/internal/service/eks/cluster_auth_data_source_test.go b/internal/service/eks/cluster_auth_data_source_test.go index 2e58cd74b3f..f4e4feda9a0 100644 --- a/internal/service/eks/cluster_auth_data_source_test.go +++ b/internal/service/eks/cluster_auth_data_source_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -20,7 +20,7 @@ func TestAccEKSClusterAuthDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { diff --git a/internal/service/eks/cluster_data_source.go b/internal/service/eks/cluster_data_source.go index 18cc7eeeb9f..c5895beabd6 100644 --- a/internal/service/eks/cluster_data_source.go +++ b/internal/service/eks/cluster_data_source.go @@ -5,8 +5,9 @@ package eks import ( "context" + "time" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -190,11 +191,11 @@ func DataSourceCluster() *schema.Resource { } func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig name := d.Get("name").(string) - cluster, err := FindClusterByName(ctx, conn, name) + cluster, err := FindClusterByName(ctx, client, name) if err != nil { return diag.Errorf("reading EKS Cluster (%s): %s", name, err) @@ -209,7 +210,7 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int if cluster.OutpostConfig != nil { d.Set("cluster_id", cluster.Id) } - d.Set("created_at", aws.TimeValue(cluster.CreatedAt).String()) + d.Set("created_at", aws.ToTime(cluster.CreatedAt).Format(time.RFC3339)) if err := d.Set("enabled_cluster_log_types", flattenLogging(cluster.Logging)); err != nil { return diag.Errorf("setting enabled_cluster_log_types: %s", err) } diff --git a/internal/service/eks/cluster_data_source_test.go b/internal/service/eks/cluster_data_source_test.go index 4cab4dd9196..1bc5331e486 100644 --- a/internal/service/eks/cluster_data_source_test.go +++ b/internal/service/eks/cluster_data_source_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -21,7 +21,7 @@ func TestAccEKSClusterDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -70,7 +70,7 @@ func TestAccEKSClusterDataSource_outpost(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/eks/cluster_test.go b/internal/service/eks/cluster_test.go index f4a8e94dae8..52bed0cd2e6 100644 --- a/internal/service/eks/cluster_test.go +++ b/internal/service/eks/cluster_test.go @@ -11,8 +11,9 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,19 +24,19 @@ import ( ) const ( - clusterVersionUpgradeInitial = "1.21" - clusterVersionUpgradeUpdated = "1.22" + clusterVersionUpgradeInitial = "1.27" + clusterVersionUpgradeUpdated = "1.28" ) func TestAccEKSCluster_basic(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -57,7 +58,7 @@ func TestAccEKSCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.ip_family", "ipv4"), resource.TestMatchResourceAttr(resourceName, "platform_version", regexache.MustCompile(`^eks\.\d+$`)), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "status", eks.ClusterStatusActive), + resource.TestCheckResourceAttr(resourceName, "status", string(types.ClusterStatusActive)), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestMatchResourceAttr(resourceName, "version", regexache.MustCompile(`^\d+\.\d+$`)), resource.TestCheckResourceAttr(resourceName, "vpc_config.#", "1"), @@ -79,13 +80,13 @@ func TestAccEKSCluster_basic(t *testing.T) { func TestAccEKSCluster_disappears(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -103,14 +104,14 @@ func TestAccEKSCluster_disappears(t *testing.T) { func TestAccEKSCluster_Encryption_create(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" kmsKeyResourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -135,14 +136,14 @@ func TestAccEKSCluster_Encryption_create(t *testing.T) { func TestAccEKSCluster_Encryption_update(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" kmsKeyResourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -176,14 +177,14 @@ func TestAccEKSCluster_Encryption_update(t *testing.T) { // https://github.com/hashicorp/terraform-provider-aws/issues/19968. func TestAccEKSCluster_Encryption_versionUpdate(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" kmsKeyResourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -221,13 +222,13 @@ func TestAccEKSCluster_Encryption_versionUpdate(t *testing.T) { func TestAccEKSCluster_version(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -257,13 +258,13 @@ func TestAccEKSCluster_version(t *testing.T) { func TestAccEKSCluster_logging(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -305,13 +306,13 @@ func TestAccEKSCluster_logging(t *testing.T) { func TestAccEKSCluster_tags(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2, cluster3 eks.Cluster + var cluster1, cluster2, cluster3 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -351,13 +352,13 @@ func TestAccEKSCluster_tags(t *testing.T) { func TestAccEKSCluster_VPC_securityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -380,13 +381,13 @@ func TestAccEKSCluster_VPC_securityGroupIDs(t *testing.T) { func TestAccEKSCluster_VPC_endpointPrivateAccess(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2, cluster3 eks.Cluster + var cluster1, cluster2, cluster3 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -427,13 +428,13 @@ func TestAccEKSCluster_VPC_endpointPrivateAccess(t *testing.T) { func TestAccEKSCluster_VPC_endpointPublicAccess(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2, cluster3 eks.Cluster + var cluster1, cluster2, cluster3 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -474,13 +475,13 @@ func TestAccEKSCluster_VPC_endpointPublicAccess(t *testing.T) { func TestAccEKSCluster_VPC_publicAccessCIDRs(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -511,13 +512,13 @@ func TestAccEKSCluster_VPC_publicAccessCIDRs(t *testing.T) { func TestAccEKSCluster_Network_serviceIPv4CIDR(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -574,13 +575,13 @@ func TestAccEKSCluster_Network_serviceIPv4CIDR(t *testing.T) { func TestAccEKSCluster_Network_ipFamily(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -625,14 +626,14 @@ func TestAccEKSCluster_Network_ipFamily(t *testing.T) { func TestAccEKSCluster_Outpost_create(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" controlPlaneInstanceType := "m5d.large" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -657,14 +658,14 @@ func TestAccEKSCluster_Outpost_create(t *testing.T) { func TestAccEKSCluster_Outpost_placement(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" controlPlaneInstanceType := "m5d.large" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -688,7 +689,7 @@ func TestAccEKSCluster_Outpost_placement(t *testing.T) { }) } -func testAccCheckClusterExists(ctx context.Context, resourceName string, cluster *eks.Cluster) resource.TestCheckFunc { +func testAccCheckClusterExists(ctx context.Context, resourceName string, cluster *types.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -698,15 +699,15 @@ func testAccCheckClusterExists(ctx context.Context, resourceName string, cluster return fmt.Errorf("No EKS Cluster ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - output, err := tfeks.FindClusterByName(ctx, conn, rs.Primary.ID) + output, err := tfeks.FindClusterByName(ctx, client, rs.Primary.ID) if err != nil { return err } - *cluster = *output + cluster = output return nil } @@ -719,9 +720,9 @@ func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - _, err := tfeks.FindClusterByName(ctx, conn, rs.Primary.ID) + _, err := tfeks.FindClusterByName(ctx, client, rs.Primary.ID) if tfresource.NotFound(err) { continue @@ -738,9 +739,9 @@ func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckClusterRecreated(i, j *eks.Cluster) resource.TestCheckFunc { +func testAccCheckClusterRecreated(i, j *types.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { + if aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { return errors.New("EKS Cluster was not recreated") } @@ -748,9 +749,9 @@ func testAccCheckClusterRecreated(i, j *eks.Cluster) resource.TestCheckFunc { } } -func testAccCheckClusterNotRecreated(i, j *eks.Cluster) resource.TestCheckFunc { +func testAccCheckClusterNotRecreated(i, j *types.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { + if !aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { return errors.New("EKS Cluster was recreated") } @@ -759,11 +760,11 @@ func testAccCheckClusterNotRecreated(i, j *eks.Cluster) resource.TestCheckFunc { } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) input := &eks.ListClustersInput{} - _, err := conn.ListClustersWithContext(ctx, input) + _, err := conn.ListClusters(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/eks/clusters_data_source.go b/internal/service/eks/clusters_data_source.go index ffdd46b1a90..e47c9001214 100644 --- a/internal/service/eks/clusters_data_source.go +++ b/internal/service/eks/clusters_data_source.go @@ -6,8 +6,7 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -31,27 +30,22 @@ func DataSourceClusters() *schema.Resource { func dataSourceClustersRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) - var clusters []*string + var clusters []string - err := conn.ListClustersPagesWithContext(ctx, &eks.ListClustersInput{}, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) + for paginator.HasMorePages() { + output, err := paginator.NextPage(ctx) + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing EKS Clusters: %s", err) } - clusters = append(clusters, page.Clusters...) - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing EKS Clusters: %s", err) + clusters = append(clusters, output.Clusters...) } d.SetId(meta.(*conns.AWSClient).Region) - - d.Set("names", aws.StringValueSlice(clusters)) + d.Set("names", clusters) return diags } diff --git a/internal/service/eks/clusters_data_source_test.go b/internal/service/eks/clusters_data_source_test.go index a35a5c0f752..c7da75e874b 100644 --- a/internal/service/eks/clusters_data_source_test.go +++ b/internal/service/eks/clusters_data_source_test.go @@ -6,7 +6,7 @@ package eks_test import ( "testing" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -19,7 +19,7 @@ func TestAccEKSClustersDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/eks/consts.go b/internal/service/eks/consts.go index 4bf92304f5c..cb821b1bc14 100644 --- a/internal/service/eks/consts.go +++ b/internal/service/eks/consts.go @@ -11,16 +11,6 @@ const ( IdentityProviderConfigTypeOIDC = "oidc" ) -const ( - ResourcesSecrets = "secrets" -) - -func Resources_Values() []string { - return []string{ - ResourcesSecrets, - } -} - const ( propagationTimeout = 2 * time.Minute ) diff --git a/internal/service/eks/errors.go b/internal/service/eks/errors.go index ee8ac774ab2..809e42933d2 100644 --- a/internal/service/eks/errors.go +++ b/internal/service/eks/errors.go @@ -7,84 +7,84 @@ import ( "fmt" "strings" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/eks" multierror "github.com/hashicorp/go-multierror" ) -func AddonIssueError(apiObject *eks.AddonIssue) error { +func AddonIssueError(apiObject *types.AddonIssue) error { if apiObject == nil { return nil } - return awserr.New(aws.StringValue(apiObject.Code), aws.StringValue(apiObject.Message), nil) + return awserr.New(string(apiObject.Code), aws.ToString(apiObject.Message), nil) } -func AddonIssuesError(apiObjects []*eks.AddonIssue) error { +func AddonIssuesError(apiObjects []types.AddonIssue) error { var errors *multierror.Error for _, apiObject := range apiObjects { - if apiObject == nil { + if &apiObject == nil { continue } - err := AddonIssueError(apiObject) + err := AddonIssueError(&apiObject) if err != nil { - errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(aws.StringValueSlice(apiObject.ResourceIds), ", "), err)) + errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(apiObject.ResourceIds, ", "), err)) } } return errors.ErrorOrNil() } -func ErrorDetailError(apiObject *eks.ErrorDetail) error { - if apiObject == nil { +func ErrorDetailError(apiObject types.ErrorDetail) error { + if &apiObject == nil { return nil } - return awserr.New(aws.StringValue(apiObject.ErrorCode), aws.StringValue(apiObject.ErrorMessage), nil) + return awserr.New(string(apiObject.ErrorCode), aws.ToString(apiObject.ErrorMessage), nil) } -func ErrorDetailsError(apiObjects []*eks.ErrorDetail) error { +func ErrorDetailsError(apiObjects []types.ErrorDetail) error { var errors *multierror.Error for _, apiObject := range apiObjects { - if apiObject == nil { + if &apiObject == nil { continue } err := ErrorDetailError(apiObject) if err != nil { - errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(aws.StringValueSlice(apiObject.ResourceIds), ", "), err)) + errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(apiObject.ResourceIds, ", "), err)) } } return errors.ErrorOrNil() } -func IssueError(apiObject *eks.Issue) error { +func IssueError(apiObject *types.Issue) error { if apiObject == nil { return nil } - return awserr.New(aws.StringValue(apiObject.Code), aws.StringValue(apiObject.Message), nil) + return awserr.New(string(apiObject.Code), aws.ToString(apiObject.Message), nil) } -func IssuesError(apiObjects []*eks.Issue) error { +func IssuesError(apiObjects []types.Issue) error { var errors *multierror.Error for _, apiObject := range apiObjects { - if apiObject == nil { + if &apiObject == nil { continue } - err := IssueError(apiObject) + err := IssueError(&apiObject) if err != nil { - errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(aws.StringValueSlice(apiObject.ResourceIds), ", "), err)) + errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(apiObject.ResourceIds, ", "), err)) } } diff --git a/internal/service/eks/fargate_profile.go b/internal/service/eks/fargate_profile.go index 55a4954086d..a4addb0176e 100644 --- a/internal/service/eks/fargate_profile.go +++ b/internal/service/eks/fargate_profile.go @@ -7,17 +7,19 @@ import ( "context" "fmt" "log" + "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -109,7 +111,7 @@ func ResourceFargateProfile() *schema.Resource { func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName := d.Get("cluster_name").(string) fargateProfileName := d.Get("fargate_profile_name").(string) @@ -120,7 +122,7 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m FargateProfileName: aws.String(fargateProfileName), PodExecutionRoleArn: aws.String(d.Get("pod_execution_role_arn").(string)), Selectors: expandFargateProfileSelectors(d.Get("selector").(*schema.Set).List()), - Subnets: flex.ExpandStringSet(d.Get("subnet_ids").(*schema.Set)), + Subnets: flex.ExpandStringValueSet(d.Get("subnet_ids").(*schema.Set)), Tags: getTagsIn(ctx), } @@ -130,12 +132,14 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m defer conns.GlobalMutexKV.Unlock(mutexKey) err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { - _, err := conn.CreateFargateProfileWithContext(ctx, input) + _, err := client.CreateFargateProfile(ctx, input) // Retry for IAM eventual consistency on error: // InvalidParameterException: Misconfigured PodExecutionRole Trust Policy; Please add the eks-fargate-pods.amazonaws.com Service Principal - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "Misconfigured PodExecutionRole Trust Policy") { - return retry.RetryableError(err) + if errs.IsA[*types.InvalidParameterException](err) { + if strings.Contains(err.Error(), "Misconfigured PodExecutionRole Trust Policy") { + return retry.RetryableError(err) + } } if err != nil { @@ -146,7 +150,7 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m }) if tfresource.TimedOut(err) { - _, err = conn.CreateFargateProfileWithContext(ctx, input) + _, err = client.CreateFargateProfile(ctx, input) } if err != nil { @@ -155,8 +159,13 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m d.SetId(profileID) - _, err = waitFargateProfileCreated(ctx, conn, clusterName, fargateProfileName, d.Timeout(schema.TimeoutCreate)) + waiter := eks.NewFargateProfileActiveWaiter(client) + waiterParams := &eks.DescribeFargateProfileInput{ + ClusterName: aws.String(clusterName), + FargateProfileName: aws.String(fargateProfileName), + } + err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutCreate)) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EKS Fargate Profile (%s) to create: %s", d.Id(), err) } @@ -166,7 +175,7 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m func resourceFargateProfileRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, fargateProfileName, err := FargateProfileParseResourceID(d.Id()) @@ -174,7 +183,7 @@ func resourceFargateProfileRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "reading EKS Fargate Profile (%s): %s", d.Id(), err) } - fargateProfile, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) + fargateProfile, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Fargate Profile (%s) not found, removing from state", d.Id()) @@ -197,7 +206,7 @@ func resourceFargateProfileRead(ctx context.Context, d *schema.ResourceData, met d.Set("status", fargateProfile.Status) - if err := d.Set("subnet_ids", aws.StringValueSlice(fargateProfile.Subnets)); err != nil { + if err := d.Set("subnet_ids", fargateProfile.Subnets); err != nil { return sdkdiag.AppendErrorf(diags, "setting subnet_ids: %s", err) } @@ -216,7 +225,7 @@ func resourceFargateProfileUpdate(ctx context.Context, d *schema.ResourceData, m func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, fargateProfileName, err := FargateProfileParseResourceID(d.Id()) @@ -230,12 +239,12 @@ func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, m defer conns.GlobalMutexKV.Unlock(mutexKey) log.Printf("[DEBUG] Deleting EKS Fargate Profile: %s", d.Id()) - _, err = conn.DeleteFargateProfileWithContext(ctx, &eks.DeleteFargateProfileInput{ + _, err = client.DeleteFargateProfile(ctx, &eks.DeleteFargateProfileInput{ ClusterName: aws.String(clusterName), FargateProfileName: aws.String(fargateProfileName), }) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return diags } @@ -243,8 +252,13 @@ func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "deleting EKS Fargate Profile (%s): %s", d.Id(), err) } - _, err = waitFargateProfileDeleted(ctx, conn, clusterName, fargateProfileName, d.Timeout(schema.TimeoutDelete)) + waiter := eks.NewFargateProfileDeletedWaiter(client) + waiterParams := &eks.DescribeFargateProfileInput{ + ClusterName: aws.String(clusterName), + FargateProfileName: aws.String(fargateProfileName), + } + err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutDelete)) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting EKS Fargate Profile (%s): waiting for completion: %s", d.Id(), err) } @@ -252,24 +266,27 @@ func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func expandFargateProfileSelectors(l []interface{}) []*eks.FargateProfileSelector { +func expandFargateProfileSelectors(l []interface{}) []types.FargateProfileSelector { if len(l) == 0 { return nil } - fargateProfileSelectors := make([]*eks.FargateProfileSelector, 0, len(l)) + fargateProfileSelectors := make([]types.FargateProfileSelector, 0, len(l)) for _, mRaw := range l { m, ok := mRaw.(map[string]interface{}) - if !ok { continue } - fargateProfileSelector := &eks.FargateProfileSelector{} + fargateProfileSelector := types.FargateProfileSelector{} if v, ok := m["labels"].(map[string]interface{}); ok && len(v) > 0 { - fargateProfileSelector.Labels = flex.ExpandStringMap(v) + fargateProfileSelector.Labels = make(map[string]string) + for key, value := range flex.ExpandStringMap(v) { + val := value + fargateProfileSelector.Labels[key] = *val + } } if v, ok := m["namespace"].(string); ok && v != "" { @@ -282,7 +299,7 @@ func expandFargateProfileSelectors(l []interface{}) []*eks.FargateProfileSelecto return fargateProfileSelectors } -func flattenFargateProfileSelectors(fargateProfileSelectors []*eks.FargateProfileSelector) []map[string]interface{} { +func flattenFargateProfileSelectors(fargateProfileSelectors []types.FargateProfileSelector) []map[string]interface{} { if len(fargateProfileSelectors) == 0 { return []map[string]interface{}{} } @@ -291,8 +308,8 @@ func flattenFargateProfileSelectors(fargateProfileSelectors []*eks.FargateProfil for _, fargateProfileSelector := range fargateProfileSelectors { m := map[string]interface{}{ - "labels": aws.StringValueMap(fargateProfileSelector.Labels), - "namespace": aws.StringValue(fargateProfileSelector.Namespace), + "labels": fargateProfileSelector.Labels, + "namespace": fargateProfileSelector.Namespace, } l = append(l, m) diff --git a/internal/service/eks/fargate_profile_test.go b/internal/service/eks/fargate_profile_test.go index 7bfca00372f..99ba4507c20 100644 --- a/internal/service/eks/fargate_profile_test.go +++ b/internal/service/eks/fargate_profile_test.go @@ -9,8 +9,9 @@ import ( "testing" "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +23,7 @@ import ( func TestAccEKSFargateProfile_basic(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile eks.FargateProfile + var fargateProfile types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) eksClusterResourceName := "aws_eks_cluster.test" iamRoleResourceName := "aws_iam_role.pod" @@ -30,7 +31,7 @@ func TestAccEKSFargateProfile_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -43,7 +44,7 @@ func TestAccEKSFargateProfile_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "fargate_profile_name", rName), resource.TestCheckResourceAttrPair(resourceName, "pod_execution_role_arn", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "selector.#", "1"), - resource.TestCheckResourceAttr(resourceName, "status", eks.FargateProfileStatusActive), + resource.TestCheckResourceAttr(resourceName, "status", string(types.FargateProfileStatusActive)), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "2"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -59,13 +60,13 @@ func TestAccEKSFargateProfile_basic(t *testing.T) { func TestAccEKSFargateProfile_disappears(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile eks.FargateProfile + var fargateProfile types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -83,14 +84,14 @@ func TestAccEKSFargateProfile_disappears(t *testing.T) { func TestAccEKSFargateProfile_Multi_profile(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile eks.FargateProfile + var fargateProfile types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName1 := "aws_eks_fargate_profile.test.0" resourceName2 := "aws_eks_fargate_profile.test.1" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -107,13 +108,13 @@ func TestAccEKSFargateProfile_Multi_profile(t *testing.T) { func TestAccEKSFargateProfile_Selector_labels(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile1 eks.FargateProfile + var fargateProfile1 types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -134,13 +135,13 @@ func TestAccEKSFargateProfile_Selector_labels(t *testing.T) { func TestAccEKSFargateProfile_tags(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile1, fargateProfile2, fargateProfile3 eks.FargateProfile + var fargateProfile1, fargateProfile2, fargateProfile3 types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -178,7 +179,7 @@ func TestAccEKSFargateProfile_tags(t *testing.T) { }) } -func testAccCheckFargateProfileExists(ctx context.Context, n string, v *eks.FargateProfile) resource.TestCheckFunc { +func testAccCheckFargateProfileExists(ctx context.Context, n string, v *types.FargateProfile) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -195,9 +196,9 @@ func testAccCheckFargateProfileExists(ctx context.Context, n string, v *eks.Farg return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - output, err := tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) + output, err := tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) if err != nil { return err @@ -211,7 +212,7 @@ func testAccCheckFargateProfileExists(ctx context.Context, n string, v *eks.Farg func testAccCheckFargateProfileDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_fargate_profile" { @@ -224,7 +225,7 @@ func testAccCheckFargateProfileDestroy(ctx context.Context) resource.TestCheckFu return err } - _, err = tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) + _, err = tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) if tfresource.NotFound(err) { continue diff --git a/internal/service/eks/find.go b/internal/service/eks/find.go index 2d5b5e6e11e..48fc403bc76 100644 --- a/internal/service/eks/find.go +++ b/internal/service/eks/find.go @@ -6,21 +6,22 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -func FindAddonByClusterNameAndAddonName(ctx context.Context, conn *eks.EKS, clusterName, addonName string) (*eks.Addon, error) { +func FindAddonByClusterNameAndAddonName(ctx context.Context, client *eks.Client, clusterName, addonName string) (*types.Addon, error) { input := &eks.DescribeAddonInput{ AddonName: aws.String(addonName), ClusterName: aws.String(clusterName), } - output, err := conn.DescribeAddonWithContext(ctx, input) + output, err := client.DescribeAddon(ctx, input) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -41,16 +42,16 @@ func FindAddonByClusterNameAndAddonName(ctx context.Context, conn *eks.EKS, clus return output.Addon, nil } -func FindAddonUpdateByClusterNameAddonNameAndID(ctx context.Context, conn *eks.EKS, clusterName, addonName, id string) (*eks.Update, error) { +func FindAddonUpdateByClusterNameAddonNameAndID(ctx context.Context, client *eks.Client, clusterName, addonName, id string) (*types.Update, error) { input := &eks.DescribeUpdateInput{ AddonName: aws.String(addonName), Name: aws.String(clusterName), UpdateId: aws.String(id), } - output, err := conn.DescribeUpdateWithContext(ctx, input) + output, err := client.DescribeUpdate(ctx, input) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -71,47 +72,45 @@ func FindAddonUpdateByClusterNameAddonNameAndID(ctx context.Context, conn *eks.E return output.Update, nil } -func FindAddonVersionByAddonNameAndKubernetesVersion(ctx context.Context, conn *eks.EKS, addonName, kubernetesVersion string, mostRecent bool) (*eks.AddonVersionInfo, error) { +func FindAddonVersionByAddonNameAndKubernetesVersion(ctx context.Context, client *eks.Client, addonName, kubernetesVersion string, mostRecent bool) (*types.AddonVersionInfo, error) { input := &eks.DescribeAddonVersionsInput{ AddonName: aws.String(addonName), KubernetesVersion: aws.String(kubernetesVersion), } - var version *eks.AddonVersionInfo + var version *types.AddonVersionInfo - err := conn.DescribeAddonVersionsPagesWithContext(ctx, input, func(page *eks.DescribeAddonVersionsOutput, lastPage bool) bool { - if page == nil || len(page.Addons) == 0 { - return !lastPage + paginator := eks.NewDescribeAddonVersionsPaginator(client, input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err } for _, addon := range page.Addons { for i, addonVersion := range addon.AddonVersions { if mostRecent && i == 0 { - version = addonVersion - return !lastPage + version = &addonVersion + break } for _, versionCompatibility := range addonVersion.Compatibilities { - if aws.BoolValue(versionCompatibility.DefaultVersion) { - version = addonVersion - return !lastPage + if bool(versionCompatibility.DefaultVersion) { + version = &addonVersion + break } } } } - return lastPage - }) - - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err } - if version == nil || version.AddonVersion == nil { + if version.AddonVersion == nil { return nil, &retry.NotFoundError{ Message: "Empty result", LastRequest: input, @@ -121,15 +120,15 @@ func FindAddonVersionByAddonNameAndKubernetesVersion(ctx context.Context, conn * return version, nil } -func FindFargateProfileByClusterNameAndFargateProfileName(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string) (*eks.FargateProfile, error) { +func FindFargateProfileByClusterNameAndFargateProfileName(ctx context.Context, client *eks.Client, clusterName, fargateProfileName string) (*types.FargateProfile, error) { input := &eks.DescribeFargateProfileInput{ ClusterName: aws.String(clusterName), FargateProfileName: aws.String(fargateProfileName), } - output, err := conn.DescribeFargateProfileWithContext(ctx, input) + output, err := client.DescribeFargateProfile(ctx, input) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -150,15 +149,15 @@ func FindFargateProfileByClusterNameAndFargateProfileName(ctx context.Context, c return output.FargateProfile, nil } -func FindNodegroupByClusterNameAndNodegroupName(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string) (*eks.Nodegroup, error) { +func FindNodegroupByClusterNameAndNodegroupName(ctx context.Context, client *eks.Client, clusterName, nodeGroupName string) (*types.Nodegroup, error) { input := &eks.DescribeNodegroupInput{ ClusterName: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), } - output, err := conn.DescribeNodegroupWithContext(ctx, input) + output, err := client.DescribeNodegroup(ctx, input) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -179,16 +178,16 @@ func FindNodegroupByClusterNameAndNodegroupName(ctx context.Context, conn *eks.E return output.Nodegroup, nil } -func FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName, id string) (*eks.Update, error) { +func FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx context.Context, client *eks.Client, clusterName, nodeGroupName, id string) (*types.Update, error) { input := &eks.DescribeUpdateInput{ Name: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), UpdateId: aws.String(id), } - output, err := conn.DescribeUpdateWithContext(ctx, input) + output, err := client.DescribeUpdate(ctx, input) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -209,18 +208,18 @@ func FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx context.Context, con return output.Update, nil } -func FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx context.Context, conn *eks.EKS, clusterName, configName string) (*eks.OidcIdentityProviderConfig, error) { +func FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx context.Context, client *eks.Client, clusterName, configName string) (*types.OidcIdentityProviderConfig, error) { input := &eks.DescribeIdentityProviderConfigInput{ ClusterName: aws.String(clusterName), - IdentityProviderConfig: &eks.IdentityProviderConfig{ + IdentityProviderConfig: &types.IdentityProviderConfig{ Name: aws.String(configName), Type: aws.String(IdentityProviderConfigTypeOIDC), }, } - output, err := conn.DescribeIdentityProviderConfigWithContext(ctx, input) + output, err := client.DescribeIdentityProviderConfig(ctx, input) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/eks/generate.go b/internal/service/eks/generate.go index d9cf18f5965..91be6cd1d41 100644 --- a/internal/service/eks/generate.go +++ b/internal/service/eks/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ServiceTagsMap -KVTValues -SkipTypesImp -ListTags -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/eks/identity_provider_config.go b/internal/service/eks/identity_provider_config.go index 7bd35298854..5f6e418e5a6 100644 --- a/internal/service/eks/identity_provider_config.go +++ b/internal/service/eks/identity_provider_config.go @@ -6,16 +6,18 @@ package eks import ( "context" "log" + "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -131,7 +133,7 @@ func ResourceIdentityProviderConfig() *schema.Resource { } func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName := d.Get("cluster_name").(string) configName, oidc := expandOIDCIdentityProviderConfigRequest(d.Get("oidc").([]interface{})[0].(map[string]interface{})) @@ -143,7 +145,7 @@ func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.Resourc Tags: getTagsIn(ctx), } - _, err := conn.AssociateIdentityProviderConfigWithContext(ctx, input) + _, err := client.AssociateIdentityProviderConfig(ctx, input) if err != nil { return diag.Errorf("associating EKS Identity Provider Config (%s): %s", idpID, err) @@ -151,7 +153,7 @@ func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.Resourc d.SetId(idpID) - _, err = waitOIDCIdentityProviderConfigCreated(ctx, conn, clusterName, configName, d.Timeout(schema.TimeoutCreate)) + _, err = waitOIDCIdentityProviderConfigCreated(ctx, client, clusterName, configName, d.Timeout(schema.TimeoutCreate)) if err != nil { return diag.Errorf("waiting for EKS Identity Provider Config (%s) association: %s", d.Id(), err) @@ -161,7 +163,7 @@ func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.Resourc } func resourceIdentityProviderConfigRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, configName, err := IdentityProviderConfigParseResourceID(d.Id()) @@ -169,7 +171,7 @@ func resourceIdentityProviderConfigRead(ctx context.Context, d *schema.ResourceD return diag.FromErr(err) } - oidc, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) + oidc, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, client, clusterName, configName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Identity Provider Config (%s) not found, removing from state", d.Id()) @@ -201,7 +203,7 @@ func resourceIdentityProviderConfigUpdate(ctx context.Context, d *schema.Resourc } func resourceIdentityProviderConfigDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, configName, err := IdentityProviderConfigParseResourceID(d.Id()) @@ -210,27 +212,29 @@ func resourceIdentityProviderConfigDelete(ctx context.Context, d *schema.Resourc } log.Printf("[DEBUG] Disassociating EKS Identity Provider Config: %s", d.Id()) - _, err = conn.DisassociateIdentityProviderConfigWithContext(ctx, &eks.DisassociateIdentityProviderConfigInput{ + _, err = client.DisassociateIdentityProviderConfig(ctx, &eks.DisassociateIdentityProviderConfigInput{ ClusterName: aws.String(clusterName), - IdentityProviderConfig: &eks.IdentityProviderConfig{ + IdentityProviderConfig: &types.IdentityProviderConfig{ Name: aws.String(configName), Type: aws.String(IdentityProviderConfigTypeOIDC), }, }) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil } - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidRequestException, "Identity provider config is not associated with cluster") { - return nil + if errs.IsA[*types.InvalidRequestException](err) { + if strings.Contains(err.Error(), "Identity provider config is not associated with cluster") { + return nil + } } if err != nil { return diag.Errorf("disassociating EKS Identity Provider Config (%s): %s", d.Id(), err) } - _, err = waitOIDCIdentityProviderConfigDeleted(ctx, conn, clusterName, configName, d.Timeout(schema.TimeoutDelete)) + _, err = waitOIDCIdentityProviderConfigDeleted(ctx, client, clusterName, configName, d.Timeout(schema.TimeoutDelete)) if err != nil { return diag.Errorf("waiting for EKS Identity Provider Config (%s) disassociation: %s", d.Id(), err) @@ -239,12 +243,12 @@ func resourceIdentityProviderConfigDelete(ctx context.Context, d *schema.Resourc return nil } -func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (string, *eks.OidcIdentityProviderConfigRequest) { +func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (string, *types.OidcIdentityProviderConfigRequest) { if tfMap == nil { return "", nil } - apiObject := &eks.OidcIdentityProviderConfigRequest{} + apiObject := &types.OidcIdentityProviderConfigRequest{} if v, ok := tfMap["client_id"].(string); ok && v != "" { apiObject.ClientId = aws.String(v) @@ -269,7 +273,7 @@ func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (stri } if v, ok := tfMap["required_claims"].(map[string]interface{}); ok && len(v) > 0 { - apiObject.RequiredClaims = flex.ExpandStringMap(v) + apiObject.RequiredClaims = flex.ExpandStringValueMap(v) } if v, ok := tfMap["username_claim"].(string); ok && v != "" { @@ -283,7 +287,7 @@ func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (stri return identityProviderConfigName, apiObject } -func flattenOIDCIdentityProviderConfig(apiObject *eks.OidcIdentityProviderConfig) map[string]interface{} { +func flattenOIDCIdentityProviderConfig(apiObject *types.OidcIdentityProviderConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -291,35 +295,35 @@ func flattenOIDCIdentityProviderConfig(apiObject *eks.OidcIdentityProviderConfig tfMap := map[string]interface{}{} if v := apiObject.ClientId; v != nil { - tfMap["client_id"] = aws.StringValue(v) + tfMap["client_id"] = v } if v := apiObject.GroupsClaim; v != nil { - tfMap["groups_claim"] = aws.StringValue(v) + tfMap["groups_claim"] = v } if v := apiObject.GroupsPrefix; v != nil { - tfMap["groups_prefix"] = aws.StringValue(v) + tfMap["groups_prefix"] = v } if v := apiObject.IdentityProviderConfigName; v != nil { - tfMap["identity_provider_config_name"] = aws.StringValue(v) + tfMap["identity_provider_config_name"] = v } if v := apiObject.IssuerUrl; v != nil { - tfMap["issuer_url"] = aws.StringValue(v) + tfMap["issuer_url"] = v } if v := apiObject.RequiredClaims; v != nil { - tfMap["required_claims"] = aws.StringValueMap(v) + tfMap["required_claims"] = v } if v := apiObject.UsernameClaim; v != nil { - tfMap["username_claim"] = aws.StringValue(v) + tfMap["username_claim"] = v } if v := apiObject.UsernamePrefix; v != nil { - tfMap["username_prefix"] = aws.StringValue(v) + tfMap["username_prefix"] = v } return tfMap diff --git a/internal/service/eks/identity_provider_config_test.go b/internal/service/eks/identity_provider_config_test.go index e0fd7f1fa95..6294a41a77b 100644 --- a/internal/service/eks/identity_provider_config_test.go +++ b/internal/service/eks/identity_provider_config_test.go @@ -9,7 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,14 +22,14 @@ import ( func TestAccEKSIdentityProviderConfig_basic(t *testing.T) { ctx := acctest.Context(t) - var config eks.OidcIdentityProviderConfig + var config types.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) eksClusterResourceName := "aws_eks_cluster.test" resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -65,13 +66,13 @@ func TestAccEKSIdentityProviderConfig_basic(t *testing.T) { func TestAccEKSIdentityProviderConfig_disappears(t *testing.T) { ctx := acctest.Context(t) - var config eks.OidcIdentityProviderConfig + var config types.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -89,13 +90,13 @@ func TestAccEKSIdentityProviderConfig_disappears(t *testing.T) { func TestAccEKSIdentityProviderConfig_allOIDCOptions(t *testing.T) { ctx := acctest.Context(t) - var config eks.OidcIdentityProviderConfig + var config types.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -127,13 +128,13 @@ func TestAccEKSIdentityProviderConfig_allOIDCOptions(t *testing.T) { func TestAccEKSIdentityProviderConfig_tags(t *testing.T) { ctx := acctest.Context(t) - var config eks.OidcIdentityProviderConfig + var config types.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -171,7 +172,7 @@ func TestAccEKSIdentityProviderConfig_tags(t *testing.T) { }) } -func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName string, config *eks.OidcIdentityProviderConfig) resource.TestCheckFunc { +func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName string, config *types.OidcIdentityProviderConfig) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -188,9 +189,9 @@ func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - output, err := tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) + output, err := tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, client, clusterName, configName) if err != nil { return err @@ -204,7 +205,7 @@ func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName func testAccCheckIdentityProviderConfigDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_identity_provider_config" { @@ -217,7 +218,7 @@ func testAccCheckIdentityProviderConfigDestroy(ctx context.Context) resource.Tes return err } - _, err = tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) + _, err = tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, client, clusterName, configName) if tfresource.NotFound(err) { continue diff --git a/internal/service/eks/node_group.go b/internal/service/eks/node_group.go index ad1a8af448b..586e20529d2 100644 --- a/internal/service/eks/node_group.go +++ b/internal/service/eks/node_group.go @@ -9,15 +9,17 @@ import ( "reflect" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -47,22 +49,22 @@ func ResourceNodeGroup() *schema.Resource { Schema: map[string]*schema.Schema{ "ami_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(eks.AMITypes_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.AMITypes](), }, "arn": { Type: schema.TypeString, Computed: true, }, "capacity_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(eks.CapacityTypes_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.CapacityTypes](), }, "cluster_name": { Type: schema.TypeString, @@ -248,9 +250,9 @@ func ResourceNodeGroup() *schema.Resource { ValidateFunc: validation.StringLenBetween(0, 63), }, "effect": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(eks.TaintEffect_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.TaintEffect](), }, }, }, @@ -293,7 +295,7 @@ func ResourceNodeGroup() *schema.Resource { } func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName := d.Get("cluster_name").(string) nodeGroupName := create.Name(d.Get("node_group_name").(string), d.Get("node_group_name_prefix").(string)) @@ -303,28 +305,28 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i ClusterName: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), NodeRole: aws.String(d.Get("node_role_arn").(string)), - Subnets: flex.ExpandStringSet(d.Get("subnet_ids").(*schema.Set)), + Subnets: flex.ExpandStringValueSet(d.Get("subnet_ids").(*schema.Set)), Tags: getTagsIn(ctx), } if v, ok := d.GetOk("ami_type"); ok { - input.AmiType = aws.String(v.(string)) + input.AmiType = types.AMITypes(v.(string)) } if v, ok := d.GetOk("capacity_type"); ok { - input.CapacityType = aws.String(v.(string)) + input.CapacityType = types.CapacityTypes(v.(string)) } if v, ok := d.GetOk("disk_size"); ok { - input.DiskSize = aws.Int64(int64(v.(int))) + input.DiskSize = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("instance_types"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.InstanceTypes = flex.ExpandStringList(v.([]interface{})) + input.InstanceTypes = flex.ExpandStringValueList(v.([]interface{})) } if v := d.Get("labels").(map[string]interface{}); len(v) > 0 { - input.Labels = flex.ExpandStringMap(v) + input.Labels = flex.ExpandStringValueMap(v) } if v := d.Get("launch_template").([]interface{}); len(v) > 0 { @@ -355,7 +357,7 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i input.Version = aws.String(v.(string)) } - _, err := conn.CreateNodegroupWithContext(ctx, input) + _, err := client.CreateNodegroup(ctx, input) if err != nil { return diag.Errorf("creating EKS Node Group (%s): %s", groupID, err) @@ -363,8 +365,13 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i d.SetId(groupID) - _, err = waitNodegroupCreated(ctx, conn, clusterName, nodeGroupName, d.Timeout(schema.TimeoutCreate)) + waiter := eks.NewNodegroupActiveWaiter(client) + waiterParams := &eks.DescribeNodegroupInput{ + ClusterName: aws.String(clusterName), + NodegroupName: aws.String(nodeGroupName), + } + err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutCreate)) if err != nil { return diag.Errorf("waiting for EKS Node Group (%s) to create: %s", d.Id(), err) } @@ -373,7 +380,7 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i } func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, nodeGroupName, err := NodeGroupParseResourceID(d.Id()) @@ -381,7 +388,7 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int return diag.FromErr(err) } - nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) + nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Node Group (%s) not found, removing from state", d.Id()) @@ -399,11 +406,11 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("cluster_name", nodeGroup.ClusterName) d.Set("disk_size", nodeGroup.DiskSize) - if err := d.Set("instance_types", aws.StringValueSlice(nodeGroup.InstanceTypes)); err != nil { + if err := d.Set("instance_types", nodeGroup.InstanceTypes); err != nil { return diag.Errorf("setting instance_types: %s", err) } - if err := d.Set("labels", aws.StringValueMap(nodeGroup.Labels)); err != nil { + if err := d.Set("labels", nodeGroup.Labels); err != nil { return diag.Errorf("setting labels: %s", err) } @@ -412,7 +419,7 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int } d.Set("node_group_name", nodeGroup.NodegroupName) - d.Set("node_group_name_prefix", create.NamePrefixFromName(aws.StringValue(nodeGroup.NodegroupName))) + d.Set("node_group_name_prefix", create.NamePrefixFromName(aws.ToString(nodeGroup.NodegroupName))) d.Set("node_role_arn", nodeGroup.NodeRole) d.Set("release_version", nodeGroup.ReleaseVersion) @@ -434,7 +441,7 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("status", nodeGroup.Status) - if err := d.Set("subnet_ids", aws.StringValueSlice(nodeGroup.Subnets)); err != nil { + if err := d.Set("subnet_ids", nodeGroup.Subnets); err != nil { return diag.Errorf("setting subnets: %s", err) } @@ -458,7 +465,7 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int } func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, nodeGroupName, err := NodeGroupParseResourceID(d.Id()) @@ -471,7 +478,7 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i input := &eks.UpdateNodegroupVersionInput{ ClientRequestToken: aws.String(id.UniqueId()), ClusterName: aws.String(clusterName), - Force: aws.Bool(d.Get("force_update_version").(bool)), + Force: *aws.Bool(d.Get("force_update_version").(bool)), NodegroupName: aws.String(nodeGroupName), } @@ -503,15 +510,15 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i input.Version = aws.String(v.(string)) } - output, err := conn.UpdateNodegroupVersionWithContext(ctx, input) + output, err := client.UpdateNodegroupVersion(ctx, input) if err != nil { return diag.Errorf("updating EKS Node Group (%s) version: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) - _, err = waitNodegroupUpdateSuccessful(ctx, conn, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitNodegroupUpdateSuccessful(ctx, client, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Node Group (%s) version update (%s): %s", d.Id(), updateID, err) @@ -542,15 +549,15 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i } } - output, err := conn.UpdateNodegroupConfigWithContext(ctx, input) + output, err := client.UpdateNodegroupConfig(ctx, input) if err != nil { return diag.Errorf("updating EKS Node Group (%s) config: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) - _, err = waitNodegroupUpdateSuccessful(ctx, conn, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitNodegroupUpdateSuccessful(ctx, client, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Node Group (%s) config update (%s): %s", d.Id(), updateID, err) @@ -561,7 +568,7 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i } func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, nodeGroupName, err := NodeGroupParseResourceID(d.Id()) @@ -570,12 +577,12 @@ func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta i } log.Printf("[DEBUG] Deleting EKS Node Group: %s", d.Id()) - _, err = conn.DeleteNodegroupWithContext(ctx, &eks.DeleteNodegroupInput{ + _, err = client.DeleteNodegroup(ctx, &eks.DeleteNodegroupInput{ ClusterName: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), }) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil } @@ -583,8 +590,13 @@ func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta i return diag.Errorf("deleting EKS Node Group (%s): %s", d.Id(), err) } - _, err = waitNodegroupDeleted(ctx, conn, clusterName, nodeGroupName, d.Timeout(schema.TimeoutDelete)) + waiter := eks.NewNodegroupDeletedWaiter(client) + waiterParams := &eks.DescribeNodegroupInput{ + ClusterName: aws.String(clusterName), + NodegroupName: aws.String(nodeGroupName), + } + err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutDelete)) if err != nil { return diag.Errorf("waiting for EKS Node Group (%s) to delete: %s", d.Id(), err) } @@ -592,14 +604,14 @@ func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta i return nil } -func expandLaunchTemplateSpecification(l []interface{}) *eks.LaunchTemplateSpecification { +func expandLaunchTemplateSpecification(l []interface{}) *types.LaunchTemplateSpecification { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - config := &eks.LaunchTemplateSpecification{} + config := &types.LaunchTemplateSpecification{} if v, ok := m["id"].(string); ok && v != "" { config.Id = aws.String(v) @@ -616,34 +628,34 @@ func expandLaunchTemplateSpecification(l []interface{}) *eks.LaunchTemplateSpeci return config } -func expandNodegroupScalingConfig(tfMap map[string]interface{}) *eks.NodegroupScalingConfig { +func expandNodegroupScalingConfig(tfMap map[string]interface{}) *types.NodegroupScalingConfig { if tfMap == nil { return nil } - apiObject := &eks.NodegroupScalingConfig{} + apiObject := &types.NodegroupScalingConfig{} if v, ok := tfMap["desired_size"].(int); ok { - apiObject.DesiredSize = aws.Int64(int64(v)) + apiObject.DesiredSize = aws.Int32(int32(v)) } if v, ok := tfMap["max_size"].(int); ok && v != 0 { - apiObject.MaxSize = aws.Int64(int64(v)) + apiObject.MaxSize = aws.Int32(int32(v)) } if v, ok := tfMap["min_size"].(int); ok { - apiObject.MinSize = aws.Int64(int64(v)) + apiObject.MinSize = aws.Int32(int32(v)) } return apiObject } -func expandTaints(l []interface{}) []*eks.Taint { +func expandTaints(l []interface{}) []types.Taint { if len(l) == 0 { return nil } - var taints []*eks.Taint + var taints []types.Taint for _, raw := range l { t, ok := raw.(map[string]interface{}) @@ -652,7 +664,7 @@ func expandTaints(l []interface{}) []*eks.Taint { continue } - taint := &eks.Taint{} + taint := types.Taint{} if k, ok := t["key"].(string); ok { taint.Key = aws.String(k) @@ -663,7 +675,7 @@ func expandTaints(l []interface{}) []*eks.Taint { } if e, ok := t["effect"].(string); ok { - taint.Effect = aws.String(e) + taint.Effect = types.TaintEffect(e) } taints = append(taints, taint) @@ -672,25 +684,17 @@ func expandTaints(l []interface{}) []*eks.Taint { return taints } -func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *eks.UpdateTaintsPayload { +func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *types.UpdateTaintsPayload { oldTaints := expandTaints(oldTaintsRaw) newTaints := expandTaints(newTaintsRaw) - var removedTaints []*eks.Taint + var removedTaints []types.Taint for _, ot := range oldTaints { - if ot == nil { - continue - } - removed := true for _, nt := range newTaints { - if nt == nil { - continue - } - // if both taint.key and taint.effect are the same, we don't need to remove it. - if aws.StringValue(nt.Key) == aws.StringValue(ot.Key) && - aws.StringValue(nt.Effect) == aws.StringValue(ot.Effect) { + if aws.ToString(nt.Key) == aws.ToString(ot.Key) && + string(nt.Effect) == string(ot.Effect) { removed = false break } @@ -701,18 +705,10 @@ func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *eks.Up } } - var updatedTaints []*eks.Taint + var updatedTaints []types.Taint for _, nt := range newTaints { - if nt == nil { - continue - } - updated := true for _, ot := range oldTaints { - if nt == nil { - continue - } - if reflect.DeepEqual(nt, ot) { updated = false break @@ -727,7 +723,7 @@ func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *eks.Up return nil } - updateTaintsPayload := &eks.UpdateTaintsPayload{} + updateTaintsPayload := &types.UpdateTaintsPayload{} if len(removedTaints) > 0 { updateTaintsPayload.RemoveTaints = removedTaints @@ -740,45 +736,45 @@ func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *eks.Up return updateTaintsPayload } -func expandRemoteAccessConfig(l []interface{}) *eks.RemoteAccessConfig { +func expandRemoteAccessConfig(l []interface{}) *types.RemoteAccessConfig { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - config := &eks.RemoteAccessConfig{} + config := &types.RemoteAccessConfig{} if v, ok := m["ec2_ssh_key"].(string); ok && v != "" { config.Ec2SshKey = aws.String(v) } if v, ok := m["source_security_group_ids"].(*schema.Set); ok && v.Len() > 0 { - config.SourceSecurityGroups = flex.ExpandStringSet(v) + config.SourceSecurityGroups = flex.ExpandStringValueSet(v) } return config } -func expandNodegroupUpdateConfig(tfMap map[string]interface{}) *eks.NodegroupUpdateConfig { +func expandNodegroupUpdateConfig(tfMap map[string]interface{}) *types.NodegroupUpdateConfig { if tfMap == nil { return nil } - apiObject := &eks.NodegroupUpdateConfig{} + apiObject := &types.NodegroupUpdateConfig{} if v, ok := tfMap["max_unavailable"].(int); ok && v != 0 { - apiObject.MaxUnavailable = aws.Int64(int64(v)) + apiObject.MaxUnavailable = aws.Int32(int32(v)) } if v, ok := tfMap["max_unavailable_percentage"].(int); ok && v != 0 { - apiObject.MaxUnavailablePercentage = aws.Int64(int64(v)) + apiObject.MaxUnavailablePercentage = aws.Int32(int32(v)) } return apiObject } -func expandUpdateLabelsPayload(ctx context.Context, oldLabelsMap, newLabelsMap interface{}) *eks.UpdateLabelsPayload { +func expandUpdateLabelsPayload(ctx context.Context, oldLabelsMap, newLabelsMap interface{}) *types.UpdateLabelsPayload { // EKS Labels operate similarly to keyvaluetags oldLabels := tftags.New(ctx, oldLabelsMap) newLabels := tftags.New(ctx, newLabelsMap) @@ -790,20 +786,20 @@ func expandUpdateLabelsPayload(ctx context.Context, oldLabelsMap, newLabelsMap i return nil } - updateLabelsPayload := &eks.UpdateLabelsPayload{} + updateLabelsPayload := &types.UpdateLabelsPayload{} if len(removedLabels) > 0 { - updateLabelsPayload.RemoveLabels = aws.StringSlice(removedLabels.Keys()) + updateLabelsPayload.RemoveLabels = removedLabels.Keys() } if len(updatedLabels) > 0 { - updateLabelsPayload.AddOrUpdateLabels = aws.StringMap(updatedLabels.Map()) + updateLabelsPayload.AddOrUpdateLabels = updatedLabels.Map() } return updateLabelsPayload } -func flattenAutoScalingGroups(autoScalingGroups []*eks.AutoScalingGroup) []map[string]interface{} { +func flattenAutoScalingGroups(autoScalingGroups []types.AutoScalingGroup) []map[string]interface{} { if len(autoScalingGroups) == 0 { return []map[string]interface{}{} } @@ -812,7 +808,7 @@ func flattenAutoScalingGroups(autoScalingGroups []*eks.AutoScalingGroup) []map[s for _, autoScalingGroup := range autoScalingGroups { m := map[string]interface{}{ - "name": aws.StringValue(autoScalingGroup.Name), + "name": aws.ToString(autoScalingGroup.Name), } l = append(l, m) @@ -821,7 +817,7 @@ func flattenAutoScalingGroups(autoScalingGroups []*eks.AutoScalingGroup) []map[s return l } -func flattenLaunchTemplateSpecification(config *eks.LaunchTemplateSpecification) []map[string]interface{} { +func flattenLaunchTemplateSpecification(config *types.LaunchTemplateSpecification) []map[string]interface{} { if config == nil { return nil } @@ -829,34 +825,34 @@ func flattenLaunchTemplateSpecification(config *eks.LaunchTemplateSpecification) m := map[string]interface{}{} if v := config.Id; v != nil { - m["id"] = aws.StringValue(v) + m["id"] = aws.ToString(v) } if v := config.Name; v != nil { - m["name"] = aws.StringValue(v) + m["name"] = aws.ToString(v) } if v := config.Version; v != nil { - m["version"] = aws.StringValue(v) + m["version"] = aws.ToString(v) } return []map[string]interface{}{m} } -func flattenNodeGroupResources(resources *eks.NodegroupResources) []map[string]interface{} { +func flattenNodeGroupResources(resources *types.NodegroupResources) []map[string]interface{} { if resources == nil { return []map[string]interface{}{} } m := map[string]interface{}{ "autoscaling_groups": flattenAutoScalingGroups(resources.AutoScalingGroups), - "remote_access_security_group_id": aws.StringValue(resources.RemoteAccessSecurityGroup), + "remote_access_security_group_id": aws.ToString(resources.RemoteAccessSecurityGroup), } return []map[string]interface{}{m} } -func flattenNodeGroupScalingConfig(apiObject *eks.NodegroupScalingConfig) map[string]interface{} { +func flattenNodeGroupScalingConfig(apiObject *types.NodegroupScalingConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -864,21 +860,21 @@ func flattenNodeGroupScalingConfig(apiObject *eks.NodegroupScalingConfig) map[st tfMap := map[string]interface{}{} if v := apiObject.DesiredSize; v != nil { - tfMap["desired_size"] = aws.Int64Value(v) + tfMap["desired_size"] = v } if v := apiObject.MaxSize; v != nil { - tfMap["max_size"] = aws.Int64Value(v) + tfMap["max_size"] = v } if v := apiObject.MinSize; v != nil { - tfMap["min_size"] = aws.Int64Value(v) + tfMap["min_size"] = v } return tfMap } -func flattenNodeGroupUpdateConfig(apiObject *eks.NodegroupUpdateConfig) map[string]interface{} { +func flattenNodeGroupUpdateConfig(apiObject *types.NodegroupUpdateConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -886,30 +882,30 @@ func flattenNodeGroupUpdateConfig(apiObject *eks.NodegroupUpdateConfig) map[stri tfMap := map[string]interface{}{} if v := apiObject.MaxUnavailable; v != nil { - tfMap["max_unavailable"] = aws.Int64Value(v) + tfMap["max_unavailable"] = v } if v := apiObject.MaxUnavailablePercentage; v != nil { - tfMap["max_unavailable_percentage"] = aws.Int64Value(v) + tfMap["max_unavailable_percentage"] = v } return tfMap } -func flattenRemoteAccessConfig(config *eks.RemoteAccessConfig) []map[string]interface{} { +func flattenRemoteAccessConfig(config *types.RemoteAccessConfig) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "ec2_ssh_key": aws.StringValue(config.Ec2SshKey), - "source_security_group_ids": aws.StringValueSlice(config.SourceSecurityGroups), + "ec2_ssh_key": config.Ec2SshKey, + "source_security_group_ids": config.SourceSecurityGroups, } return []map[string]interface{}{m} } -func flattenTaints(taints []*eks.Taint) []interface{} { +func flattenTaints(taints []types.Taint) []interface{} { if len(taints) == 0 { return nil } @@ -917,14 +913,10 @@ func flattenTaints(taints []*eks.Taint) []interface{} { var results []interface{} for _, taint := range taints { - if taint == nil { - continue - } - t := make(map[string]interface{}) - t["key"] = aws.StringValue(taint.Key) - t["value"] = aws.StringValue(taint.Value) - t["effect"] = aws.StringValue(taint.Effect) + t["key"] = taint.Key + t["value"] = taint.Value + t["effect"] = taint.Effect results = append(results, t) } diff --git a/internal/service/eks/node_group_data_source.go b/internal/service/eks/node_group_data_source.go index 32cbdf4af4c..4ae390647b7 100644 --- a/internal/service/eks/node_group_data_source.go +++ b/internal/service/eks/node_group_data_source.go @@ -6,7 +6,6 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -184,13 +183,13 @@ func DataSourceNodeGroup() *schema.Resource { } func dataSourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + client := meta.(*conns.AWSClient).EKSClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig clusterName := d.Get("cluster_name").(string) nodeGroupName := d.Get("node_group_name").(string) id := NodeGroupCreateResourceID(clusterName, nodeGroupName) - nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) + nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) if err != nil { return diag.Errorf("reading EKS Node Group (%s): %s", id, err) @@ -225,7 +224,7 @@ func dataSourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("scaling_config", nil) } d.Set("status", nodeGroup.Status) - d.Set("subnet_ids", aws.StringValueSlice(nodeGroup.Subnets)) + d.Set("subnet_ids", nodeGroup.Subnets) if err := d.Set("taints", flattenTaints(nodeGroup.Taints)); err != nil { return diag.Errorf("setting taints: %s", err) } diff --git a/internal/service/eks/node_group_data_source_test.go b/internal/service/eks/node_group_data_source_test.go index e44c6c34166..51c678bf8b5 100644 --- a/internal/service/eks/node_group_data_source_test.go +++ b/internal/service/eks/node_group_data_source_test.go @@ -7,7 +7,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -15,14 +16,14 @@ import ( func TestAccEKSNodeGroupDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup + var nodeGroup types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceResourceName := "data.aws_eks_node_group.test" resourceName := "aws_eks_node_group.test" resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/eks/node_group_test.go b/internal/service/eks/node_group_test.go index 379a7ee0ab3..685bf45ae16 100644 --- a/internal/service/eks/node_group_test.go +++ b/internal/service/eks/node_group_test.go @@ -9,8 +9,9 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,12 +22,12 @@ import ( ) func init() { - acctest.RegisterServiceErrorCheckFunc(eks.EndpointsID, testAccErrorCheckSkip) + acctest.RegisterServiceErrorCheckFunc(eks.ServiceID, testAccErrorCheckSkip) } func TestAccEKSNodeGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup + var nodeGroup types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) eksClusterResourceName := "aws_eks_cluster.test" iamRoleResourceName := "aws_iam_role.node" @@ -34,7 +35,7 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -42,10 +43,10 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { Config: testAccNodeGroupConfig_dataSourceName(rName), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup), - resource.TestCheckResourceAttr(resourceName, "ami_type", eks.AMITypesAl2X8664), + resource.TestCheckResourceAttr(resourceName, "ami_type", string(types.AMITypesAl2X8664)), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "eks", regexache.MustCompile(fmt.Sprintf("nodegroup/%[1]s/%[1]s/.+", rName))), resource.TestCheckResourceAttrPair(resourceName, "cluster_name", eksClusterResourceName, "name"), - resource.TestCheckResourceAttr(resourceName, "capacity_type", eks.CapacityTypesOnDemand), + resource.TestCheckResourceAttr(resourceName, "capacity_type", string(types.CapacityTypesOnDemand)), resource.TestCheckResourceAttr(resourceName, "disk_size", "20"), resource.TestCheckResourceAttr(resourceName, "instance_types.#", "1"), resource.TestCheckResourceAttr(resourceName, "labels.%", "0"), @@ -60,7 +61,7 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "scaling_config.0.desired_size", "1"), resource.TestCheckResourceAttr(resourceName, "scaling_config.0.max_size", "1"), resource.TestCheckResourceAttr(resourceName, "scaling_config.0.min_size", "1"), - resource.TestCheckResourceAttr(resourceName, "status", eks.NodegroupStatusActive), + resource.TestCheckResourceAttr(resourceName, "status", string(types.NodegroupStatusActive)), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "2"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "taint.#", "0"), @@ -79,13 +80,13 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { func TestAccEKSNodeGroup_Name_generated(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup + var nodeGroup types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -108,13 +109,13 @@ func TestAccEKSNodeGroup_Name_generated(t *testing.T) { func TestAccEKSNodeGroup_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup + var nodeGroup types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -137,13 +138,13 @@ func TestAccEKSNodeGroup_namePrefix(t *testing.T) { func TestAccEKSNodeGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup + var nodeGroup types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -161,21 +162,21 @@ func TestAccEKSNodeGroup_disappears(t *testing.T) { func TestAccEKSNodeGroup_amiType(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccNodeGroupConfig_amiType(rName, eks.AMITypesAl2X8664Gpu), + Config: testAccNodeGroupConfig_amiType(rName, string(types.AMITypesAl2X8664Gpu)), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup1), - resource.TestCheckResourceAttr(resourceName, "ami_type", eks.AMITypesAl2X8664Gpu), + resource.TestCheckResourceAttr(resourceName, "ami_type", string(types.AMITypesAl2X8664Gpu)), ), }, { @@ -184,10 +185,10 @@ func TestAccEKSNodeGroup_amiType(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccNodeGroupConfig_amiType(rName, eks.AMITypesAl2Arm64), + Config: testAccNodeGroupConfig_amiType(rName, string(types.AMITypesAl2Arm64)), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup2), - resource.TestCheckResourceAttr(resourceName, "ami_type", eks.AMITypesAl2Arm64), + resource.TestCheckResourceAttr(resourceName, "ami_type", string(types.AMITypesAl2Arm64)), ), }, }, @@ -196,21 +197,21 @@ func TestAccEKSNodeGroup_amiType(t *testing.T) { func TestAccEKSNodeGroup_CapacityType_spot(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccNodeGroupConfig_capacityType(rName, eks.CapacityTypesSpot), + Config: testAccNodeGroupConfig_capacityType(rName, string(types.CapacityTypesSpot)), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup1), - resource.TestCheckResourceAttr(resourceName, "capacity_type", eks.CapacityTypesSpot), + resource.TestCheckResourceAttr(resourceName, "capacity_type", string(types.CapacityTypesSpot)), ), }, { @@ -224,13 +225,13 @@ func TestAccEKSNodeGroup_CapacityType_spot(t *testing.T) { func TestAccEKSNodeGroup_diskSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -252,13 +253,13 @@ func TestAccEKSNodeGroup_diskSize(t *testing.T) { func TestAccEKSNodeGroup_forceUpdateVersion(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -288,14 +289,14 @@ func TestAccEKSNodeGroup_forceUpdateVersion(t *testing.T) { func TestAccEKSNodeGroup_InstanceTypes_multiple(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" instanceTypes := fmt.Sprintf("%q, %q, %q, %q", "t2.medium", "t3.medium", "t2.large", "t3.large") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -321,13 +322,13 @@ func TestAccEKSNodeGroup_InstanceTypes_multiple(t *testing.T) { func TestAccEKSNodeGroup_InstanceTypes_single(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -349,13 +350,13 @@ func TestAccEKSNodeGroup_InstanceTypes_single(t *testing.T) { func TestAccEKSNodeGroup_labels(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2, nodeGroup3 eks.Nodegroup + var nodeGroup1, nodeGroup2, nodeGroup3 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -395,7 +396,7 @@ func TestAccEKSNodeGroup_labels(t *testing.T) { func TestAccEKSNodeGroup_LaunchTemplate_id(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateResourceName1 := "aws_launch_template.test1" launchTemplateResourceName2 := "aws_launch_template.test2" @@ -403,7 +404,7 @@ func TestAccEKSNodeGroup_LaunchTemplate_id(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -435,7 +436,7 @@ func TestAccEKSNodeGroup_LaunchTemplate_id(t *testing.T) { func TestAccEKSNodeGroup_LaunchTemplate_name(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateResourceName1 := "aws_launch_template.test1" launchTemplateResourceName2 := "aws_launch_template.test2" @@ -443,7 +444,7 @@ func TestAccEKSNodeGroup_LaunchTemplate_name(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -475,14 +476,14 @@ func TestAccEKSNodeGroup_LaunchTemplate_name(t *testing.T) { func TestAccEKSNodeGroup_LaunchTemplate_version(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateResourceName := "aws_launch_template.test" resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -514,14 +515,14 @@ func TestAccEKSNodeGroup_LaunchTemplate_version(t *testing.T) { func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) ssmParameterDataSourceName := "data.aws_ssm_parameter.test" resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -551,7 +552,7 @@ func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { func TestAccEKSNodeGroup_RemoteAccess_ec2SSHKey(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" @@ -562,7 +563,7 @@ func TestAccEKSNodeGroup_RemoteAccess_ec2SSHKey(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -585,7 +586,7 @@ func TestAccEKSNodeGroup_RemoteAccess_ec2SSHKey(t *testing.T) { func TestAccEKSNodeGroup_RemoteAccess_sourceSecurityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" @@ -596,7 +597,7 @@ func TestAccEKSNodeGroup_RemoteAccess_sourceSecurityGroupIDs(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -619,13 +620,13 @@ func TestAccEKSNodeGroup_RemoteAccess_sourceSecurityGroupIDs(t *testing.T) { func TestAccEKSNodeGroup_Scaling_desiredSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -661,13 +662,13 @@ func TestAccEKSNodeGroup_Scaling_desiredSize(t *testing.T) { func TestAccEKSNodeGroup_Scaling_maxSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -703,13 +704,13 @@ func TestAccEKSNodeGroup_Scaling_maxSize(t *testing.T) { func TestAccEKSNodeGroup_Scaling_minSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -745,13 +746,13 @@ func TestAccEKSNodeGroup_Scaling_minSize(t *testing.T) { func TestAccEKSNodeGroup_ScalingZeroDesiredSize_minSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -797,13 +798,13 @@ func TestAccEKSNodeGroup_ScalingZeroDesiredSize_minSize(t *testing.T) { func TestAccEKSNodeGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2, nodeGroup3 eks.Nodegroup + var nodeGroup1, nodeGroup2, nodeGroup3 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -845,13 +846,13 @@ func TestAccEKSNodeGroup_tags(t *testing.T) { func TestAccEKSNodeGroup_taints(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -909,13 +910,13 @@ func TestAccEKSNodeGroup_taints(t *testing.T) { func TestAccEKSNodeGroup_update(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -948,13 +949,13 @@ func TestAccEKSNodeGroup_update(t *testing.T) { func TestAccEKSNodeGroup_version(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -988,7 +989,7 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { ) } -func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeGroup *eks.Nodegroup) resource.TestCheckFunc { +func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeGroup *types.Nodegroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -1005,9 +1006,9 @@ func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeG return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - output, err := tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) + output, err := tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) if err != nil { return err @@ -1021,7 +1022,7 @@ func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeG func testAccCheckNodeGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_node_group" { @@ -1034,7 +1035,7 @@ func testAccCheckNodeGroupDestroy(ctx context.Context) resource.TestCheckFunc { return err } - _, err = tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) + _, err = tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) if tfresource.NotFound(err) { continue @@ -1051,20 +1052,20 @@ func testAccCheckNodeGroupDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckNodeGroupNotRecreated(i, j *eks.Nodegroup) resource.TestCheckFunc { +func testAccCheckNodeGroupNotRecreated(i, j *types.Nodegroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { - return fmt.Errorf("EKS Node Group (%s) was recreated", aws.StringValue(j.NodegroupName)) + if !aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { + return fmt.Errorf("EKS Node Group (%s) was recreated", aws.ToString(j.NodegroupName)) } return nil } } -func testAccCheckNodeGroupRecreated(i, j *eks.Nodegroup) resource.TestCheckFunc { +func testAccCheckNodeGroupRecreated(i, j *types.Nodegroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { - return fmt.Errorf("EKS Node Group (%s) was not recreated", aws.StringValue(j.NodegroupName)) + if aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { + return fmt.Errorf("EKS Node Group (%s) was not recreated", aws.ToString(j.NodegroupName)) } return nil diff --git a/internal/service/eks/node_groups_data_source.go b/internal/service/eks/node_groups_data_source.go index 4f3eb7bc7dc..7d321d02eef 100644 --- a/internal/service/eks/node_groups_data_source.go +++ b/internal/service/eks/node_groups_data_source.go @@ -6,8 +6,8 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -37,34 +37,29 @@ func DataSourceNodeGroups() *schema.Resource { func dataSourceNodeGroupsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) - + client := meta.(*conns.AWSClient).EKSClient(ctx) clusterName := d.Get("cluster_name").(string) input := &eks.ListNodegroupsInput{ ClusterName: aws.String(clusterName), } - var nodegroups []*string - - err := conn.ListNodegroupsPagesWithContext(ctx, input, func(page *eks.ListNodegroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + var nodegroups []string - nodegroups = append(nodegroups, page.Nodegroups...) + paginator := eks.NewListNodegroupsPaginator(client, input) + for paginator.HasMorePages() { + output, err := paginator.NextPage(ctx) - return !lastPage - }) + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing EKS Node Groups: %s", err) + } - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing EKS Node Groups: %s", err) + nodegroups = append(nodegroups, output.Nodegroups...) } d.SetId(clusterName) - d.Set("cluster_name", clusterName) - d.Set("names", aws.StringValueSlice(nodegroups)) + d.Set("names", nodegroups) return diags } diff --git a/internal/service/eks/node_groups_data_source_test.go b/internal/service/eks/node_groups_data_source_test.go index 2dd1c1c8942..25bd6b1b52b 100644 --- a/internal/service/eks/node_groups_data_source_test.go +++ b/internal/service/eks/node_groups_data_source_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -20,7 +20,7 @@ func TestAccEKSNodeGroupsDataSource_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 9e6d4ad9270..ae3f5129a26 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -5,9 +5,8 @@ package eks import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - eks_sdkv1 "github.com/aws/aws-sdk-go/service/eks" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + eks_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -105,11 +104,15 @@ func (p *servicePackage) ServicePackageName() string { return names.EKS } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*eks_sdkv1.EKS, error) { - sess := config["session"].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*eks_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return eks_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil + return eks_sdkv2.NewFromConfig(cfg, func(o *eks_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + }), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/eks/status.go b/internal/service/eks/status.go index 1716b138070..bf237126344 100644 --- a/internal/service/eks/status.go +++ b/internal/service/eks/status.go @@ -6,15 +6,14 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func statusAddon(ctx context.Context, conn *eks.EKS, clusterName, addonName string) retry.StateRefreshFunc { +func statusAddon(ctx context.Context, client *eks.Client, clusterName, addonName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) + output, err := FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) if tfresource.NotFound(err) { return nil, "", nil @@ -24,13 +23,13 @@ func statusAddon(ctx context.Context, conn *eks.EKS, clusterName, addonName stri return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func statusAddonUpdate(ctx context.Context, conn *eks.EKS, clusterName, addonName, id string) retry.StateRefreshFunc { +func statusAddonUpdate(ctx context.Context, client *eks.Client, clusterName, addonName, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindAddonUpdateByClusterNameAddonNameAndID(ctx, conn, clusterName, addonName, id) + output, err := FindAddonUpdateByClusterNameAddonNameAndID(ctx, client, clusterName, addonName, id) if tfresource.NotFound(err) { return nil, "", nil @@ -40,13 +39,13 @@ func statusAddonUpdate(ctx context.Context, conn *eks.EKS, clusterName, addonNam return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func statusFargateProfile(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string) retry.StateRefreshFunc { +func statusFargateProfile(ctx context.Context, client *eks.Client, clusterName, fargateProfileName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) + output, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) if tfresource.NotFound(err) { return nil, "", nil @@ -56,13 +55,13 @@ func statusFargateProfile(ctx context.Context, conn *eks.EKS, clusterName, farga return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func statusNodegroup(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string) retry.StateRefreshFunc { +func statusNodegroup(ctx context.Context, client *eks.Client, clusterName, nodeGroupName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) + output, err := FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) if tfresource.NotFound(err) { return nil, "", nil @@ -72,13 +71,13 @@ func statusNodegroup(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupN return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func statusNodegroupUpdate(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName, id string) retry.StateRefreshFunc { +func statusNodegroupUpdate(ctx context.Context, client *eks.Client, clusterName, nodeGroupName, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx, conn, clusterName, nodeGroupName, id) + output, err := FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx, client, clusterName, nodeGroupName, id) if tfresource.NotFound(err) { return nil, "", nil @@ -88,13 +87,13 @@ func statusNodegroupUpdate(ctx context.Context, conn *eks.EKS, clusterName, node return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func statusOIDCIdentityProviderConfig(ctx context.Context, conn *eks.EKS, clusterName, configName string) retry.StateRefreshFunc { +func statusOIDCIdentityProviderConfig(ctx context.Context, client *eks.Client, clusterName, configName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) + output, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, client, clusterName, configName) if tfresource.NotFound(err) { return nil, "", nil @@ -104,6 +103,6 @@ func statusOIDCIdentityProviderConfig(ctx context.Context, conn *eks.EKS, cluste return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } diff --git a/internal/service/eks/sweep.go b/internal/service/eks/sweep.go index b6338af1e91..23d99214f25 100644 --- a/internal/service/eks/sweep.go +++ b/internal/service/eks/sweep.go @@ -7,13 +7,14 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/inspector2/types" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -51,73 +52,64 @@ func RegisterSweepers() { func sweepAddons(region string) error { ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) + sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { return fmt.Errorf("error getting client: %w", err) } - - conn := client.EKSConn(ctx) - input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + client := sweepClient.EKSClient(ctx) + + paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Print(fmt.Errorf("[WARN] Skipping EKS Add-Ons sweep for %s: %w", region, err)) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors } - for _, v := range page.Clusters { - clusterName := aws.StringValue(v) + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) + } + + for _, cluster := range page.Clusters { input := &eks.ListAddonsInput{ - ClusterName: aws.String(clusterName), + ClusterName: &cluster, } - err := conn.ListAddonsPagesWithContext(ctx, input, func(page *eks.ListAddonsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.Addons { - r := ResourceAddon() - d := r.Data(nil) - d.SetId(AddonCreateResourceID(clusterName, aws.StringValue(v))) + paginator := eks.NewListAddonsPaginator(client, input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + if awsv2.SkipSweepError(err) { + continue } - return !lastPage - }) + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if errs.IsA[*types.ResourceNotFoundException](err) { + log.Print(fmt.Errorf("[WARN] Skipping cluster %s not found: %w", region, err)) + continue + } - if awsv1.SkipSweepError(err) { - continue - } + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Add-Ons (%s): %w", region, err)) + } - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - continue - } + for _, addon := range page.Addons { + r := ResourceAddon() + d := r.Data(nil) + d.SetId(AddonCreateResourceID(cluster, addon)) - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Add-Ons (%s): %w", region, err)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) + } } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Print(fmt.Errorf("[WARN] Skipping EKS Add-Ons sweep for %s: %w", region, err)) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { + if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping EKS Add-Ons (%s): %w", region, err)) } @@ -126,42 +118,37 @@ func sweepAddons(region string) error { func sweepClusters(region string) error { ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) + sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } - conn := client.EKSConn(ctx) - input := &eks.ListClustersInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + client := sweepClient.EKSClient(ctx) + + paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Clusters sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing EKS Clusters (%s): %w", region, err) } for _, cluster := range page.Clusters { r := ResourceCluster() d := r.Data(nil) - d.SetId(aws.StringValue(cluster)) + d.SetId(cluster) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EKS Clusters sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing EKS Clusters (%s): %w", region, err) } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { + if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { return fmt.Errorf("error sweeping EKS Clusters (%s): %w", region, err) } @@ -170,66 +157,60 @@ func sweepClusters(region string) error { func sweepFargateProfiles(region string) error { ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) + sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.EKSConn(ctx) - input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + client := sweepClient.EKSClient(ctx) + + paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Fargate Profiles sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } for _, cluster := range page.Clusters { input := &eks.ListFargateProfilesInput{ - ClusterName: cluster, + ClusterName: &cluster, } - err := conn.ListFargateProfilesPagesWithContext(ctx, input, func(page *eks.ListFargateProfilesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + paginator := eks.NewListFargateProfilesPaginator(client, input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + continue + } + + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if errs.IsA[*types.ResourceNotFoundException](err) { + continue + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Fargate Profiles (%s): %w", region, err)) } for _, profile := range page.FargateProfileNames { r := ResourceFargateProfile() d := r.Data(nil) - d.SetId(FargateProfileCreateResourceID(aws.StringValue(cluster), aws.StringValue(profile))) + d.SetId(FargateProfileCreateResourceID(cluster, profile)) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - continue - } - - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Fargate Profiles (%s): %w", region, err)) } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EKS Fargate Profiles sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -243,72 +224,63 @@ func sweepFargateProfiles(region string) error { func sweepIdentityProvidersConfig(region string) error { ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) + sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { return fmt.Errorf("error getting client: %w", err) } - - conn := client.EKSConn(ctx) - input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + client := sweepClient.EKSClient(ctx) + + paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Print(fmt.Errorf("[WARN] Skipping EKS Identity Provider Configs sweep for %s: %w", region, err)) + return sweeperErrs // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } for _, cluster := range page.Clusters { input := &eks.ListIdentityProviderConfigsInput{ - ClusterName: cluster, + ClusterName: &cluster, } - err := conn.ListIdentityProviderConfigsPagesWithContext(ctx, input, func(page *eks.ListIdentityProviderConfigsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + paginator := eks.NewListIdentityProviderConfigsPaginator(client, input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + continue + } + + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if errs.IsA[*types.ResourceNotFoundException](err) { + continue + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Identity Provider Configs (%s): %w", region, err)) } for _, identityProviderConfig := range page.IdentityProviderConfigs { r := ResourceIdentityProviderConfig() d := r.Data(nil) - d.SetId(IdentityProviderConfigCreateResourceID(aws.StringValue(cluster), aws.StringValue(identityProviderConfig.Name))) + d.SetId(IdentityProviderConfigCreateResourceID(cluster, aws.ToString(identityProviderConfig.Name))) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - continue - } - - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Identity Provider Configs (%s): %w", region, err)) } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Print(fmt.Errorf("[WARN] Skipping EKS Identity Provider Configs sweep for %s: %w", region, err)) - return sweeperErrs // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { + if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping EKS Identity Provider Configs (%s): %w", region, err)) } @@ -317,71 +289,62 @@ func sweepIdentityProvidersConfig(region string) error { func sweepNodeGroups(region string) error { ctx := sweep.Context(region) - client, err := sweep.SharedRegionalSweepClient(ctx, region) + sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.EKSConn(ctx) - input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + client := sweepClient.EKSClient(ctx) + + paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Node Groups sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } for _, cluster := range page.Clusters { input := &eks.ListNodegroupsInput{ - ClusterName: cluster, + ClusterName: &cluster, } - err := conn.ListNodegroupsPagesWithContext(ctx, input, func(page *eks.ListNodegroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + paginator := eks.NewListNodegroupsPaginator(client, input) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + continue + } + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if errs.IsA[*types.ResourceNotFoundException](err) { + continue + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Node Groups (%s): %w", region, err)) } for _, nodeGroup := range page.Nodegroups { r := ResourceNodeGroup() d := r.Data(nil) - d.SetId(NodeGroupCreateResourceID(aws.StringValue(cluster), aws.StringValue(nodeGroup))) + d.SetId(NodeGroupCreateResourceID(cluster, nodeGroup)) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - continue - } - - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Node Groups (%s): %w", region, err)) } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EKS Node Groups sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors } - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) - } - - err = sweep.SweepOrchestrator(ctx, sweepResources) - - if err != nil { + if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping EKS Node Groups (%s): %w", region, err)) } diff --git a/internal/service/eks/tags_gen.go b/internal/service/eks/tags_gen.go index b83a214f802..3f9c737dcce 100644 --- a/internal/service/eks/tags_gen.go +++ b/internal/service/eks/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/aws/aws-sdk-go/service/eks/eksiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists eks service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn eksiface.EKSAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *eks.Client, identifier string) (tftags.KeyValueTags, error) { input := &eks.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn eksiface.EKSAPI, identifier string) (tft // ListTags lists eks service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).EKSConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).EKSClient(ctx), identifier) if err != nil { return err @@ -49,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns eks service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from eks service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns eks service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets eks service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates eks service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn eksiface.EKSAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *eks.Client, identifier string, oldTagsMap, newTagsMap any) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn eksiface.EKSAPI, identifier string, ol if len(removedTags) > 0 { input := &eks.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn eksiface.EKSAPI, identifier string, ol Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn eksiface.EKSAPI, identifier string, ol // UpdateTags updates eks service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).EKSConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).EKSClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/eks/wait.go b/internal/service/eks/wait.go index 57a94e414fd..fa70122180a 100644 --- a/internal/service/eks/wait.go +++ b/internal/service/eks/wait.go @@ -7,9 +7,10 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -17,60 +18,18 @@ const ( clusterDeleteRetryTimeout = 60 * time.Minute ) -func waitAddonCreated(ctx context.Context, conn *eks.EKS, clusterName, addonName string, timeout time.Duration) (*eks.Addon, error) { +func waitAddonUpdateSuccessful(ctx context.Context, client *eks.Client, clusterName, addonName, id string, timeout time.Duration) (*types.Update, error) { stateConf := retry.StateChangeConf{ - Pending: []string{eks.AddonStatusCreating, eks.AddonStatusDegraded}, - Target: []string{eks.AddonStatusActive}, - Refresh: statusAddon(ctx, conn, clusterName, addonName), + Pending: enum.Slice(types.UpdateStatusInProgress), + Target: enum.Slice(types.UpdateStatusSuccessful), + Refresh: statusAddonUpdate(ctx, client, clusterName, addonName, id), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*eks.Addon); ok { - if status, health := aws.StringValue(output.Status), output.Health; status == eks.AddonStatusCreateFailed && health != nil { - tfresource.SetLastError(err, AddonIssuesError(health.Issues)) - } - - return output, err - } - - return nil, err -} - -func waitAddonDeleted(ctx context.Context, conn *eks.EKS, clusterName, addonName string, timeout time.Duration) (*eks.Addon, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.AddonStatusActive, eks.AddonStatusDeleting}, - Target: []string{}, - Refresh: statusAddon(ctx, conn, clusterName, addonName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Addon); ok { - if status, health := aws.StringValue(output.Status), output.Health; status == eks.AddonStatusDeleteFailed && health != nil { - tfresource.SetLastError(err, AddonIssuesError(health.Issues)) - } - - return output, err - } - - return nil, err -} - -func waitAddonUpdateSuccessful(ctx context.Context, conn *eks.EKS, clusterName, addonName, id string, timeout time.Duration) (*eks.Update, error) { - stateConf := retry.StateChangeConf{ - Pending: []string{eks.UpdateStatusInProgress}, - Target: []string{eks.UpdateStatusSuccessful}, - Refresh: statusAddonUpdate(ctx, conn, clusterName, addonName, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Update); ok { - if status := aws.StringValue(output.Status); status == eks.UpdateStatusCancelled || status == eks.UpdateStatusFailed { + if output, ok := outputRaw.(*types.Update); ok { + if status := output.Status; status == types.UpdateStatusCancelled || status == types.UpdateStatusFailed { tfresource.SetLastError(err, ErrorDetailsError(output.Errors)) } @@ -80,94 +39,18 @@ func waitAddonUpdateSuccessful(ctx context.Context, conn *eks.EKS, clusterName, return nil, err } -func waitFargateProfileCreated(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string, timeout time.Duration) (*eks.FargateProfile, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.FargateProfileStatusCreating}, - Target: []string{eks.FargateProfileStatusActive}, - Refresh: statusFargateProfile(ctx, conn, clusterName, fargateProfileName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.FargateProfile); ok { - return output, err - } - - return nil, err -} - -func waitFargateProfileDeleted(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string, timeout time.Duration) (*eks.FargateProfile, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.FargateProfileStatusActive, eks.FargateProfileStatusDeleting}, - Target: []string{}, - Refresh: statusFargateProfile(ctx, conn, clusterName, fargateProfileName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.FargateProfile); ok { - return output, err - } - - return nil, err -} - -func waitNodegroupCreated(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string, timeout time.Duration) (*eks.Nodegroup, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.NodegroupStatusCreating}, - Target: []string{eks.NodegroupStatusActive}, - Refresh: statusNodegroup(ctx, conn, clusterName, nodeGroupName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Nodegroup); ok { - if status, health := aws.StringValue(output.Status), output.Health; status == eks.NodegroupStatusCreateFailed && health != nil { - tfresource.SetLastError(err, IssuesError(health.Issues)) - } - - return output, err - } - - return nil, err -} - -func waitNodegroupDeleted(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string, timeout time.Duration) (*eks.Nodegroup, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.NodegroupStatusActive, eks.NodegroupStatusDeleting}, - Target: []string{}, - Refresh: statusNodegroup(ctx, conn, clusterName, nodeGroupName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Nodegroup); ok { - if status, health := aws.StringValue(output.Status), output.Health; status == eks.NodegroupStatusDeleteFailed && health != nil { - tfresource.SetLastError(err, IssuesError(health.Issues)) - } - - return output, err - } - - return nil, err -} - -func waitNodegroupUpdateSuccessful(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName, id string, timeout time.Duration) (*eks.Update, error) { //nolint:unparam +func waitNodegroupUpdateSuccessful(ctx context.Context, client *eks.Client, clusterName, nodeGroupName, id string, timeout time.Duration) (*types.Update, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ - Pending: []string{eks.UpdateStatusInProgress}, - Target: []string{eks.UpdateStatusSuccessful}, - Refresh: statusNodegroupUpdate(ctx, conn, clusterName, nodeGroupName, id), + Pending: enum.Slice(types.UpdateStatusInProgress), + Target: enum.Slice(types.UpdateStatusSuccessful), + Refresh: statusNodegroupUpdate(ctx, client, clusterName, nodeGroupName, id), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*eks.Update); ok { - if status := aws.StringValue(output.Status); status == eks.UpdateStatusCancelled || status == eks.UpdateStatusFailed { + if output, ok := outputRaw.(*types.Update); ok { + if status := output.Status; status == types.UpdateStatusCancelled || status == types.UpdateStatusFailed { tfresource.SetLastError(err, ErrorDetailsError(output.Errors)) } @@ -177,34 +60,34 @@ func waitNodegroupUpdateSuccessful(ctx context.Context, conn *eks.EKS, clusterNa return nil, err } -func waitOIDCIdentityProviderConfigCreated(ctx context.Context, conn *eks.EKS, clusterName, configName string, timeout time.Duration) (*eks.OidcIdentityProviderConfig, error) { +func waitOIDCIdentityProviderConfigCreated(ctx context.Context, client *eks.Client, clusterName, configName string, timeout time.Duration) (*types.OidcIdentityProviderConfig, error) { stateConf := retry.StateChangeConf{ - Pending: []string{eks.ConfigStatusCreating}, - Target: []string{eks.ConfigStatusActive}, - Refresh: statusOIDCIdentityProviderConfig(ctx, conn, clusterName, configName), + Pending: enum.Slice(types.ConfigStatusCreating), + Target: enum.Slice(types.ConfigStatusActive), + Refresh: statusOIDCIdentityProviderConfig(ctx, client, clusterName, configName), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*eks.OidcIdentityProviderConfig); ok { + if output, ok := outputRaw.(*types.OidcIdentityProviderConfig); ok { return output, err } return nil, err } -func waitOIDCIdentityProviderConfigDeleted(ctx context.Context, conn *eks.EKS, clusterName, configName string, timeout time.Duration) (*eks.OidcIdentityProviderConfig, error) { +func waitOIDCIdentityProviderConfigDeleted(ctx context.Context, client *eks.Client, clusterName, configName string, timeout time.Duration) (*types.OidcIdentityProviderConfig, error) { stateConf := retry.StateChangeConf{ - Pending: []string{eks.ConfigStatusActive, eks.ConfigStatusDeleting}, + Pending: enum.Slice(types.ConfigStatusActive, types.ConfigStatusDeleting), Target: []string{}, - Refresh: statusOIDCIdentityProviderConfig(ctx, conn, clusterName, configName), + Refresh: statusOIDCIdentityProviderConfig(ctx, client, clusterName, configName), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*eks.OidcIdentityProviderConfig); ok { + if output, ok := outputRaw.(*types.OidcIdentityProviderConfig); ok { return output, err } diff --git a/names/names_data.csv b/names/names_data.csv index e0fbae874eb..43dced7e57f 100644 --- a/names/names_data.csv +++ b/names/names_data.csv @@ -131,7 +131,7 @@ ecr,ecr,ecr,ecr,,ecr,,,ECR,ECR,,1,,,aws_ecr_,,ecr_,ECR (Elastic Container Regist ecr-public,ecrpublic,ecrpublic,ecrpublic,,ecrpublic,,,ECRPublic,ECRPublic,,1,,,aws_ecrpublic_,,ecrpublic_,ECR Public,Amazon,,,,,, ecs,ecs,ecs,ecs,,ecs,,,ECS,ECS,,1,,,aws_ecs_,,ecs_,ECS (Elastic Container),Amazon,,,,,, efs,efs,efs,efs,,efs,,,EFS,EFS,,1,,,aws_efs_,,efs_,EFS (Elastic File System),Amazon,,,,,, -eks,eks,eks,eks,,eks,,,EKS,EKS,,1,,,aws_eks_,,eks_,EKS (Elastic Kubernetes),Amazon,,,,,, +eks,eks,eks,eks,,eks,,,EKS,EKS,,,2,,aws_eks_,,eks_,EKS (Elastic Kubernetes),Amazon,,,,,, elasticbeanstalk,elasticbeanstalk,elasticbeanstalk,elasticbeanstalk,,elasticbeanstalk,,beanstalk,ElasticBeanstalk,ElasticBeanstalk,,1,,aws_elastic_beanstalk_,aws_elasticbeanstalk_,,elastic_beanstalk_,Elastic Beanstalk,AWS,,,,,, elastic-inference,elasticinference,elasticinference,elasticinference,,elasticinference,,,ElasticInference,ElasticInference,,1,,,aws_elasticinference_,,elasticinference_,Elastic Inference,Amazon,,x,,,, elastictranscoder,elastictranscoder,elastictranscoder,elastictranscoder,,elastictranscoder,,,ElasticTranscoder,ElasticTranscoder,,1,,,aws_elastictranscoder_,,elastictranscoder_,Elastic Transcoder,Amazon,,,,,, From 58da0462cc21084b9b29b22deabc4ed60ab49366 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Wed, 25 Oct 2023 15:18:42 -0400 Subject: [PATCH 02/46] chore: Lint corrections --- internal/service/eks/cluster.go | 78 ++++++++++--------------- internal/service/eks/fargate_profile.go | 2 +- internal/service/eks/node_group.go | 2 +- internal/service/eks/status.go | 48 --------------- 4 files changed, 33 insertions(+), 97 deletions(-) diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index d12c3b29aac..fed5a6800cf 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -535,12 +535,12 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int _, err = client.DeleteCluster(ctx, input) - if errs.IsA[*types.ResourceInUseException](err) { - return retry.RetryableError(err) - } - if err != nil { - return retry.NonRetryableError(err) + if errs.IsA[*types.ResourceInUseException](err) { + return retry.RetryableError(err) + } else { + return retry.NonRetryableError(err) + } } return nil @@ -550,19 +550,19 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int _, err = client.DeleteCluster(ctx, input) } - if errs.IsA[*types.ResourceNotFoundException](err) { - return nil - } - - // Sometimes the EKS API returns the ResourceNotFound error in this form: - // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if errs.IsA[*types.ClientException](err) { - if strings.Contains(err.Error(), "No cluster found for name:") { + if err != nil { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil } - } - if err != nil { + // Sometimes the EKS API returns the ResourceNotFound error in this form: + // ClientException: No cluster found for name: tf-acc-test-0o1f8 + if errs.IsA[*types.ClientException](err) { + if strings.Contains(err.Error(), "No cluster found for name:") { + return nil + } + } + return diag.Errorf("deleting EKS Cluster (%s): %s", d.Id(), err) } @@ -586,24 +586,24 @@ func FindClusterByName(ctx context.Context, client *eks.Client, name string) (*t output, err := client.DescribeCluster(ctx, input) - // Sometimes the EKS API returns the ResourceNotFound error in this form: - // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if errs.IsA[*types.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - if errs.IsA[*types.ClientException](err) { - if strings.Contains(err.Error(), "No cluster found for name:") { + if err != nil { + // Sometimes the EKS API returns the ResourceNotFound error in this form: + // ClientException: No cluster found for name: tf-acc-test-0o1f8 + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, } } - } + if errs.IsA[*types.ClientException](err) { + if strings.Contains(err.Error(), "No cluster found for name:") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + } - if err != nil { return nil, err } @@ -640,22 +640,6 @@ func findClusterUpdateByTwoPartKey(ctx context.Context, client *eks.Client, name return output.Update, nil } -func statusCluster(ctx context.Context, client *eks.Client, name string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindClusterByName(ctx, client, name) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.Status), nil - } -} - func statusClusterUpdate(ctx context.Context, client *eks.Client, name, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findClusterUpdateByTwoPartKey(ctx, client, name, id) @@ -806,13 +790,13 @@ func expandVPCConfigRequestForCreate(tfList []interface{}) *types.VpcConfigReque securityGroupIds := flex.ExpandStringSet(m["security_group_ids"].(*schema.Set)) securityGroupIdsSlice := make([]string, len(securityGroupIds)) for i, id := range securityGroupIds { - securityGroupIdsSlice[i] = *id + securityGroupIdsSlice[i] = aws.ToString(id) } subnetIds := flex.ExpandStringSet(m["subnet_ids"].(*schema.Set)) subnetIdsSlice := make([]string, len(subnetIds)) for i, id := range subnetIds { - subnetIdsSlice[i] = *id + subnetIdsSlice[i] = aws.ToString(id) } vpcConfigRequest := &types.VpcConfigRequest{ @@ -826,7 +810,7 @@ func expandVPCConfigRequestForCreate(tfList []interface{}) *types.VpcConfigReque publicAccessCidrs := flex.ExpandStringSet(v) vpcConfigRequest.PublicAccessCidrs = make([]string, len(publicAccessCidrs)) for i, cidr := range publicAccessCidrs { - vpcConfigRequest.PublicAccessCidrs[i] = *cidr + vpcConfigRequest.PublicAccessCidrs[i] = aws.ToString(cidr) } } @@ -849,7 +833,7 @@ func expandVPCConfigRequestForUpdate(tfList []interface{}) *types.VpcConfigReque publicAccessCidrs := flex.ExpandStringSet(v) vpcConfigRequest.PublicAccessCidrs = make([]string, len(publicAccessCidrs)) for i, cidr := range publicAccessCidrs { - vpcConfigRequest.PublicAccessCidrs[i] = *cidr + vpcConfigRequest.PublicAccessCidrs[i] = aws.ToString(cidr) } } diff --git a/internal/service/eks/fargate_profile.go b/internal/service/eks/fargate_profile.go index a4addb0176e..c1b91a325db 100644 --- a/internal/service/eks/fargate_profile.go +++ b/internal/service/eks/fargate_profile.go @@ -285,7 +285,7 @@ func expandFargateProfileSelectors(l []interface{}) []types.FargateProfileSelect fargateProfileSelector.Labels = make(map[string]string) for key, value := range flex.ExpandStringMap(v) { val := value - fargateProfileSelector.Labels[key] = *val + fargateProfileSelector.Labels[key] = aws.ToString(val) } } diff --git a/internal/service/eks/node_group.go b/internal/service/eks/node_group.go index 586e20529d2..e240a699151 100644 --- a/internal/service/eks/node_group.go +++ b/internal/service/eks/node_group.go @@ -478,7 +478,7 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i input := &eks.UpdateNodegroupVersionInput{ ClientRequestToken: aws.String(id.UniqueId()), ClusterName: aws.String(clusterName), - Force: *aws.Bool(d.Get("force_update_version").(bool)), + Force: d.Get("force_update_version").(bool), NodegroupName: aws.String(nodeGroupName), } diff --git a/internal/service/eks/status.go b/internal/service/eks/status.go index bf237126344..57103a5500b 100644 --- a/internal/service/eks/status.go +++ b/internal/service/eks/status.go @@ -11,22 +11,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func statusAddon(ctx context.Context, client *eks.Client, clusterName, addonName string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.Status), nil - } -} - func statusAddonUpdate(ctx context.Context, client *eks.Client, clusterName, addonName, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindAddonUpdateByClusterNameAddonNameAndID(ctx, client, clusterName, addonName, id) @@ -43,38 +27,6 @@ func statusAddonUpdate(ctx context.Context, client *eks.Client, clusterName, add } } -func statusFargateProfile(ctx context.Context, client *eks.Client, clusterName, fargateProfileName string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.Status), nil - } -} - -func statusNodegroup(ctx context.Context, client *eks.Client, clusterName, nodeGroupName string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.Status), nil - } -} - func statusNodegroupUpdate(ctx context.Context, client *eks.Client, clusterName, nodeGroupName, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx, client, clusterName, nodeGroupName, id) From ddb72d0bcf2ab37fe39339217988c6911365c2de Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 16:24:08 -0500 Subject: [PATCH 03/46] Revert "chore: Lint corrections" This reverts commit 58da0462cc21084b9b29b22deabc4ed60ab49366. --- internal/service/eks/cluster.go | 78 +++++++++++++++---------- internal/service/eks/fargate_profile.go | 2 +- internal/service/eks/node_group.go | 2 +- internal/service/eks/status.go | 48 +++++++++++++++ 4 files changed, 97 insertions(+), 33 deletions(-) diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index fed5a6800cf..d12c3b29aac 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -535,12 +535,12 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int _, err = client.DeleteCluster(ctx, input) + if errs.IsA[*types.ResourceInUseException](err) { + return retry.RetryableError(err) + } + if err != nil { - if errs.IsA[*types.ResourceInUseException](err) { - return retry.RetryableError(err) - } else { - return retry.NonRetryableError(err) - } + return retry.NonRetryableError(err) } return nil @@ -550,19 +550,19 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int _, err = client.DeleteCluster(ctx, input) } - if err != nil { - if errs.IsA[*types.ResourceNotFoundException](err) { - return nil - } + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil + } - // Sometimes the EKS API returns the ResourceNotFound error in this form: - // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if errs.IsA[*types.ClientException](err) { - if strings.Contains(err.Error(), "No cluster found for name:") { - return nil - } + // Sometimes the EKS API returns the ResourceNotFound error in this form: + // ClientException: No cluster found for name: tf-acc-test-0o1f8 + if errs.IsA[*types.ClientException](err) { + if strings.Contains(err.Error(), "No cluster found for name:") { + return nil } + } + if err != nil { return diag.Errorf("deleting EKS Cluster (%s): %s", d.Id(), err) } @@ -586,24 +586,24 @@ func FindClusterByName(ctx context.Context, client *eks.Client, name string) (*t output, err := client.DescribeCluster(ctx, input) - if err != nil { - // Sometimes the EKS API returns the ResourceNotFound error in this form: - // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if errs.IsA[*types.ResourceNotFoundException](err) { + // Sometimes the EKS API returns the ResourceNotFound error in this form: + // ClientException: No cluster found for name: tf-acc-test-0o1f8 + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if errs.IsA[*types.ClientException](err) { + if strings.Contains(err.Error(), "No cluster found for name:") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, } } - if errs.IsA[*types.ClientException](err) { - if strings.Contains(err.Error(), "No cluster found for name:") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - } + } + if err != nil { return nil, err } @@ -640,6 +640,22 @@ func findClusterUpdateByTwoPartKey(ctx context.Context, client *eks.Client, name return output.Update, nil } +func statusCluster(ctx context.Context, client *eks.Client, name string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindClusterByName(ctx, client, name) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + func statusClusterUpdate(ctx context.Context, client *eks.Client, name, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findClusterUpdateByTwoPartKey(ctx, client, name, id) @@ -790,13 +806,13 @@ func expandVPCConfigRequestForCreate(tfList []interface{}) *types.VpcConfigReque securityGroupIds := flex.ExpandStringSet(m["security_group_ids"].(*schema.Set)) securityGroupIdsSlice := make([]string, len(securityGroupIds)) for i, id := range securityGroupIds { - securityGroupIdsSlice[i] = aws.ToString(id) + securityGroupIdsSlice[i] = *id } subnetIds := flex.ExpandStringSet(m["subnet_ids"].(*schema.Set)) subnetIdsSlice := make([]string, len(subnetIds)) for i, id := range subnetIds { - subnetIdsSlice[i] = aws.ToString(id) + subnetIdsSlice[i] = *id } vpcConfigRequest := &types.VpcConfigRequest{ @@ -810,7 +826,7 @@ func expandVPCConfigRequestForCreate(tfList []interface{}) *types.VpcConfigReque publicAccessCidrs := flex.ExpandStringSet(v) vpcConfigRequest.PublicAccessCidrs = make([]string, len(publicAccessCidrs)) for i, cidr := range publicAccessCidrs { - vpcConfigRequest.PublicAccessCidrs[i] = aws.ToString(cidr) + vpcConfigRequest.PublicAccessCidrs[i] = *cidr } } @@ -833,7 +849,7 @@ func expandVPCConfigRequestForUpdate(tfList []interface{}) *types.VpcConfigReque publicAccessCidrs := flex.ExpandStringSet(v) vpcConfigRequest.PublicAccessCidrs = make([]string, len(publicAccessCidrs)) for i, cidr := range publicAccessCidrs { - vpcConfigRequest.PublicAccessCidrs[i] = aws.ToString(cidr) + vpcConfigRequest.PublicAccessCidrs[i] = *cidr } } diff --git a/internal/service/eks/fargate_profile.go b/internal/service/eks/fargate_profile.go index c1b91a325db..a4addb0176e 100644 --- a/internal/service/eks/fargate_profile.go +++ b/internal/service/eks/fargate_profile.go @@ -285,7 +285,7 @@ func expandFargateProfileSelectors(l []interface{}) []types.FargateProfileSelect fargateProfileSelector.Labels = make(map[string]string) for key, value := range flex.ExpandStringMap(v) { val := value - fargateProfileSelector.Labels[key] = aws.ToString(val) + fargateProfileSelector.Labels[key] = *val } } diff --git a/internal/service/eks/node_group.go b/internal/service/eks/node_group.go index e240a699151..586e20529d2 100644 --- a/internal/service/eks/node_group.go +++ b/internal/service/eks/node_group.go @@ -478,7 +478,7 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i input := &eks.UpdateNodegroupVersionInput{ ClientRequestToken: aws.String(id.UniqueId()), ClusterName: aws.String(clusterName), - Force: d.Get("force_update_version").(bool), + Force: *aws.Bool(d.Get("force_update_version").(bool)), NodegroupName: aws.String(nodeGroupName), } diff --git a/internal/service/eks/status.go b/internal/service/eks/status.go index 57103a5500b..bf237126344 100644 --- a/internal/service/eks/status.go +++ b/internal/service/eks/status.go @@ -11,6 +11,22 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) +func statusAddon(ctx context.Context, client *eks.Client, clusterName, addonName string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + func statusAddonUpdate(ctx context.Context, client *eks.Client, clusterName, addonName, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindAddonUpdateByClusterNameAddonNameAndID(ctx, client, clusterName, addonName, id) @@ -27,6 +43,38 @@ func statusAddonUpdate(ctx context.Context, client *eks.Client, clusterName, add } } +func statusFargateProfile(ctx context.Context, client *eks.Client, clusterName, fargateProfileName string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func statusNodegroup(ctx context.Context, client *eks.Client, clusterName, nodeGroupName string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + func statusNodegroupUpdate(ctx context.Context, client *eks.Client, clusterName, nodeGroupName, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx, client, clusterName, nodeGroupName, id) From 9a04832428eadb0110858a6e15c9ec761f5b5f09 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 16:24:21 -0500 Subject: [PATCH 04/46] Revert "feat: Udpate EKS to use aws-sdk-go-v2" This reverts commit 83dbd0f24c0445d2700cd863be6871da15bf4aa2. --- go.mod | 1 - go.sum | 2 - internal/conns/awsclient_gen.go | 6 +- .../service/dms/event_subscription_test.go | 6 +- internal/service/eks/addon.go | 120 +++-- internal/service/eks/addon_data_source.go | 10 +- .../service/eks/addon_data_source_test.go | 11 +- internal/service/eks/addon_test.go | 143 +++--- .../service/eks/addon_version_data_source.go | 4 +- .../eks/addon_version_data_source_test.go | 19 +- internal/service/eks/arn.go | 2 +- internal/service/eks/cluster.go | 428 ++++++++---------- .../eks/cluster_auth_data_source_test.go | 4 +- internal/service/eks/cluster_data_source.go | 9 +- .../service/eks/cluster_data_source_test.go | 6 +- internal/service/eks/cluster_test.go | 99 ++-- internal/service/eks/clusters_data_source.go | 26 +- .../service/eks/clusters_data_source_test.go | 4 +- internal/service/eks/consts.go | 10 + internal/service/eks/errors.go | 40 +- internal/service/eks/fargate_profile.go | 67 +-- internal/service/eks/fargate_profile_test.go | 35 +- internal/service/eks/find.go | 89 ++-- internal/service/eks/generate.go | 2 +- .../service/eks/identity_provider_config.go | 58 ++- .../eks/identity_provider_config_test.go | 29 +- internal/service/eks/node_group.go | 218 ++++----- .../service/eks/node_group_data_source.go | 7 +- .../eks/node_group_data_source_test.go | 7 +- internal/service/eks/node_group_test.go | 147 +++--- .../service/eks/node_groups_data_source.go | 29 +- .../eks/node_groups_data_source_test.go | 4 +- internal/service/eks/service_package_gen.go | 17 +- internal/service/eks/status.go | 39 +- internal/service/eks/sweep.go | 357 ++++++++------- internal/service/eks/tags_gen.go | 33 +- internal/service/eks/wait.go | 165 ++++++- names/names_data.csv | 2 +- 38 files changed, 1168 insertions(+), 1087 deletions(-) diff --git a/go.mod b/go.mod index c76b69c490d..58e9c2753f0 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/directoryservice v1.18.7 github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.3.2 github.com/aws/aws-sdk-go-v2/service/ec2 v1.126.0 - github.com/aws/aws-sdk-go-v2/service/eks v1.29.7 github.com/aws/aws-sdk-go-v2/service/emrserverless v1.11.2 github.com/aws/aws-sdk-go-v2/service/finspace v1.12.4 github.com/aws/aws-sdk-go-v2/service/fis v1.16.2 diff --git a/go.sum b/go.sum index 3af4e428798..3daed8dcfc3 100644 --- a/go.sum +++ b/go.sum @@ -81,8 +81,6 @@ github.com/aws/aws-sdk-go-v2/service/dynamodb v1.22.1 h1:TYq4EU2vEEluoaBG0RCPnbi github.com/aws/aws-sdk-go-v2/service/dynamodb v1.22.1/go.mod h1:1EJb9/tJwI7iqiStZBcmHijQxcgp7dlPuD2YgoZIrJQ= github.com/aws/aws-sdk-go-v2/service/ec2 v1.126.0 h1:EGYP4IDYHYe4IcpCUxEAIVKr9nZXvtql4HNhEPK1Y3w= github.com/aws/aws-sdk-go-v2/service/ec2 v1.126.0/go.mod h1:raUdIDoNuDPn9dMG3cCmIm8RoWOmZUqQPzuw8xpmB8Y= -github.com/aws/aws-sdk-go-v2/service/eks v1.29.7 h1:MRBXts9pc/3RdaeLXR4HwmVhqMP70sOEYHpkgmAWDbU= -github.com/aws/aws-sdk-go-v2/service/eks v1.29.7/go.mod h1:Nt5l6Vn68Hv0JWJ6dcQDKuBAKAfHUZSC9Ln8X/1fUMY= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.11.2 h1:diyMrawOZ56CavFS//UFFjk2LY1ooXeTqAecXsjtXwI= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.11.2/go.mod h1:ZrmnnT6zI3+0XsQIGCu/vXhIFk4Vwu4WKqeMDSzm4z4= github.com/aws/aws-sdk-go-v2/service/finspace v1.12.4 h1:xML1DGju5bsRtFejZfHuWtaut3WkYrGTHMf2G3T3de0= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 5a0a0c9490b..0a73c487c30 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -20,7 +20,6 @@ import ( directoryservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/directoryservice" docdbelastic_sdkv2 "github.com/aws/aws-sdk-go-v2/service/docdbelastic" ec2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ec2" - eks_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eks" emrserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrserverless" finspace_sdkv2 "github.com/aws/aws-sdk-go-v2/service/finspace" fis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fis" @@ -132,6 +131,7 @@ import ( ecrpublic_sdkv1 "github.com/aws/aws-sdk-go/service/ecrpublic" ecs_sdkv1 "github.com/aws/aws-sdk-go/service/ecs" efs_sdkv1 "github.com/aws/aws-sdk-go/service/efs" + eks_sdkv1 "github.com/aws/aws-sdk-go/service/eks" elasticache_sdkv1 "github.com/aws/aws-sdk-go/service/elasticache" elasticbeanstalk_sdkv1 "github.com/aws/aws-sdk-go/service/elasticbeanstalk" elasticsearchservice_sdkv1 "github.com/aws/aws-sdk-go/service/elasticsearchservice" @@ -527,8 +527,8 @@ func (c *AWSClient) EFSConn(ctx context.Context) *efs_sdkv1.EFS { return errs.Must(conn[*efs_sdkv1.EFS](ctx, c, names.EFS)) } -func (c *AWSClient) EKSClient(ctx context.Context) *eks_sdkv2.Client { - return errs.Must(client[*eks_sdkv2.Client](ctx, c, names.EKS)) +func (c *AWSClient) EKSConn(ctx context.Context) *eks_sdkv1.EKS { + return errs.Must(conn[*eks_sdkv1.EKS](ctx, c, names.EKS)) } func (c *AWSClient) ELBConn(ctx context.Context) *elb_sdkv1.ELB { diff --git a/internal/service/dms/event_subscription_test.go b/internal/service/dms/event_subscription_test.go index 0f961a8d5ce..c6665a4c870 100644 --- a/internal/service/dms/event_subscription_test.go +++ b/internal/service/dms/event_subscription_test.go @@ -8,9 +8,9 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/aws/aws-sdk-go/aws" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -389,11 +389,11 @@ resource "aws_dms_event_subscription" "test" { } func testAccPreCheckEKS(ctx context.Context, t *testing.T) { - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) input := &eks.ListClustersInput{} - _, err := client.ListClusters(ctx, input) + _, err := conn.ListClustersWithContext(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/eks/addon.go b/internal/service/eks/addon.go index 328b482baf0..ab089032a25 100644 --- a/internal/service/eks/addon.go +++ b/internal/service/eks/addon.go @@ -6,20 +6,17 @@ package eks import ( "context" "log" - "strings" "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/enum" - "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -92,22 +89,25 @@ func ResourceAddon() *schema.Resource { Optional: true, }, "resolve_conflicts": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.ResolveConflicts](), - Deprecated: `The "resolve_conflicts" attribute can't be set to "PRESERVE" on initial resource creation. Use "resolve_conflicts_on_create" and/or "resolve_conflicts_on_update" instead`, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(eks.ResolveConflicts_Values(), false), + Deprecated: `The "resolve_conflicts" attribute can't be set to "PRESERVE" on initial resource creation. Use "resolve_conflicts_on_create" and/or "resolve_conflicts_on_update" instead`, }, "resolve_conflicts_on_create": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(enum.Slice(types.ResolveConflictsNone, types.ResolveConflictsOverwrite), false), + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + eks.ResolveConflictsNone, + eks.ResolveConflictsOverwrite, + }, false), ConflictsWith: []string{"resolve_conflicts"}, }, "resolve_conflicts_on_update": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.ResolveConflicts](), - ConflictsWith: []string{"resolve_conflicts"}, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(eks.ResolveConflicts_Values(), false), + ConflictsWith: []string{"resolve_conflicts"}, }, "service_account_role_arn": { Type: schema.TypeString, @@ -122,7 +122,7 @@ func ResourceAddon() *schema.Resource { func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) addonName := d.Get("addon_name").(string) clusterName := d.Get("cluster_name").(string) @@ -143,9 +143,9 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter } if v, ok := d.GetOk("resolve_conflicts"); ok { - input.ResolveConflicts = types.ResolveConflicts(v.(string)) + input.ResolveConflicts = aws.String(v.(string)) } else if v, ok := d.GetOk("resolve_conflicts_on_create"); ok { - input.ResolveConflicts = types.ResolveConflicts(v.(string)) + input.ResolveConflicts = aws.String(v.(string)) } if v, ok := d.GetOk("service_account_role_arn"); ok { @@ -154,13 +154,15 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter _, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (interface{}, error) { - return client.CreateAddon(ctx, input) + return conn.CreateAddonWithContext(ctx, input) }, func(err error) (bool, error) { - if errs.IsA[*types.InvalidParameterException](err) { - if strings.Contains(err.Error(), "CREATE_FAILED") || strings.Contains(err.Error(), "does not exist") { - return true, err - } + if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "CREATE_FAILED") { + return true, err + } + + if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "does not exist") { + return true, err } return false, err @@ -173,14 +175,7 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter d.SetId(id) - waiter := eks.NewAddonActiveWaiter(client) - waiterParams := &eks.DescribeAddonInput{ - AddonName: aws.String(addonName), - ClusterName: aws.String(clusterName), - } - - err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutCreate)) - if err != nil { + if _, err := waitAddonCreated(ctx, conn, clusterName, addonName, d.Timeout(schema.TimeoutCreate)); err != nil { // Creating addon w/o setting resolve_conflicts to "OVERWRITE" // might result in a failed creation, if unmanaged version of addon is already deployed // and there are configuration conflicts: @@ -199,7 +194,7 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, addonName, err := AddonParseResourceID(d.Id()) @@ -207,14 +202,12 @@ func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interfa return sdkdiag.AppendFromErr(diags, err) } - addon, err := FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) + addon, err := FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) - if !d.IsNewResource() && errs.IsA[*types.ResourceNotFoundException](err) { - if !d.IsNewResource() { - log.Printf("[WARN] EKS Add-On (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] EKS Add-On (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil } if err != nil { @@ -226,8 +219,8 @@ func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interfa d.Set("arn", addon.AddonArn) d.Set("cluster_name", addon.ClusterName) d.Set("configuration_values", addon.ConfigurationValues) - d.Set("created_at", aws.ToTime(addon.CreatedAt).Format(time.RFC3339)) - d.Set("modified_at", aws.ToTime(addon.ModifiedAt).Format(time.RFC3339)) + d.Set("created_at", aws.TimeValue(addon.CreatedAt).Format(time.RFC3339)) + d.Set("modified_at", aws.TimeValue(addon.ModifiedAt).Format(time.RFC3339)) d.Set("service_account_role_arn", addon.ServiceAccountRoleArn) setTagsOut(ctx, addon.Tags) @@ -237,7 +230,7 @@ func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interfa func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, addonName, err := AddonParseResourceID(d.Id()) @@ -260,17 +253,16 @@ func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta inter input.ConfigurationValues = aws.String(d.Get("configuration_values").(string)) } - var conflictResolutionAttr string - var conflictResolution types.ResolveConflicts + var conflictResolutionAttr, conflictResolution string if v, ok := d.GetOk("resolve_conflicts"); ok { conflictResolutionAttr = "resolve_conflicts" - conflictResolution = types.ResolveConflicts(v.(string)) - input.ResolveConflicts = conflictResolution + conflictResolution = v.(string) + input.ResolveConflicts = aws.String(v.(string)) } else if v, ok := d.GetOk("resolve_conflicts_on_update"); ok { conflictResolutionAttr = "resolve_conflicts_on_update" - conflictResolution = types.ResolveConflicts(v.(string)) - input.ResolveConflicts = conflictResolution + conflictResolution = v.(string) + input.ResolveConflicts = aws.String(v.(string)) } // If service account role ARN is already provided, use it. Otherwise, the add-on uses @@ -279,19 +271,19 @@ func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta inter input.ServiceAccountRoleArn = aws.String(d.Get("service_account_role_arn").(string)) } - output, err := client.UpdateAddon(ctx, input) + output, err := conn.UpdateAddonWithContext(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EKS Add-On (%s): %s", d.Id(), err) } - updateID := aws.ToString(output.Update.Id) - if _, err := waitAddonUpdateSuccessful(ctx, client, clusterName, addonName, updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { - if conflictResolution != types.ResolveConflictsOverwrite { + updateID := aws.StringValue(output.Update.Id) + if _, err := waitAddonUpdateSuccessful(ctx, conn, clusterName, addonName, updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { + if conflictResolution != eks.ResolveConflictsOverwrite { // Changing addon version w/o setting resolve_conflicts to "OVERWRITE" // might result in a failed update if there are conflicts: // ConfigurationConflict Apply failed with 1 conflict: conflict with "kubectl"... - return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) update (%s): %s. Consider setting attribute %q to %q", d.Id(), updateID, err, conflictResolutionAttr, string(types.ResolveConflictsOverwrite)) + return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) update (%s): %s. Consider setting attribute %q to %q", d.Id(), updateID, err, conflictResolutionAttr, eks.ResolveConflictsOverwrite) } return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) update (%s): %s", d.Id(), updateID, err) @@ -303,7 +295,7 @@ func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta inter func resourceAddonDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, addonName, err := AddonParseResourceID(d.Id()) @@ -315,26 +307,20 @@ func resourceAddonDelete(ctx context.Context, d *schema.ResourceData, meta inter AddonName: aws.String(addonName), ClusterName: aws.String(clusterName), } + if v, ok := d.GetOk("preserve"); ok { - input.Preserve = v.(bool) + input.Preserve = aws.Bool(v.(bool)) } log.Printf("[DEBUG] Deleting EKS Add-On: %s", d.Id()) - _, err = client.DeleteAddon(ctx, input) + _, err = conn.DeleteAddonWithContext(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting EKS Add-On (%s): %s", d.Id(), err) } - waiter := eks.NewAddonDeletedWaiter(client) - waiterParams := &eks.DescribeAddonInput{ - AddonName: aws.String(addonName), - ClusterName: aws.String(clusterName), - } - - err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutDelete)) - if err != nil { - sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) delete: %s", d.Id(), err) + if _, err := waitAddonDeleted(ctx, conn, clusterName, addonName, d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) delete: %s", d.Id(), err) } return diags diff --git a/internal/service/eks/addon_data_source.go b/internal/service/eks/addon_data_source.go index 2c6b2e4fd7e..7779c12f741 100644 --- a/internal/service/eks/addon_data_source.go +++ b/internal/service/eks/addon_data_source.go @@ -7,7 +7,7 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -60,14 +60,14 @@ func DataSourceAddon() *schema.Resource { } func dataSourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig addonName := d.Get("addon_name").(string) clusterName := d.Get("cluster_name").(string) id := AddonCreateResourceID(clusterName, addonName) - addon, err := FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) + addon, err := FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) if err != nil { return diag.Errorf("reading EKS Add-On (%s): %s", id, err) @@ -77,8 +77,8 @@ func dataSourceAddonRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("addon_version", addon.AddonVersion) d.Set("arn", addon.AddonArn) d.Set("configuration_values", addon.ConfigurationValues) - d.Set("created_at", aws.ToTime(addon.CreatedAt).Format(time.RFC3339)) - d.Set("modified_at", aws.ToTime(addon.ModifiedAt).Format(time.RFC3339)) + d.Set("created_at", aws.TimeValue(addon.CreatedAt).Format(time.RFC3339)) + d.Set("modified_at", aws.TimeValue(addon.ModifiedAt).Format(time.RFC3339)) d.Set("service_account_role_arn", addon.ServiceAccountRoleArn) if err := d.Set("tags", KeyValueTags(ctx, addon.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { diff --git a/internal/service/eks/addon_data_source_test.go b/internal/service/eks/addon_data_source_test.go index cd6444e6eb5..9091f9676b8 100644 --- a/internal/service/eks/addon_data_source_test.go +++ b/internal/service/eks/addon_data_source_test.go @@ -7,8 +7,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -23,7 +22,7 @@ func TestAccEKSAddonDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ @@ -49,17 +48,17 @@ func TestAccEKSAddonDataSource_configurationValues(t *testing.T) { dataSourceResourceName := "data.aws_eks_addon.test" resourceName := "aws_eks_addon.test" addonName := "vpc-cni" - addonVersion := "v1.14.1-eksbuild.1" + addonVersion := "v1.10.4-eksbuild.1" configurationValues := "{\"env\": {\"WARM_ENI_TARGET\":\"2\",\"ENABLE_POD_ENI\":\"true\"},\"resources\": {\"limits\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"}}}" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonDataSourceConfig_configurationValues(rName, addonName, addonVersion, configurationValues, string(types.ResolveConflictsOverwrite)), + Config: testAccAddonDataSourceConfig_configurationValues(rName, addonName, addonVersion, configurationValues, eks.ResolveConflictsOverwrite), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "addon_version", dataSourceResourceName, "addon_version"), resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceResourceName, "arn"), diff --git a/internal/service/eks/addon_test.go b/internal/service/eks/addon_test.go index 573b1a76fb2..47cd54ac3ae 100644 --- a/internal/service/eks/addon_test.go +++ b/internal/service/eks/addon_test.go @@ -9,8 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +21,7 @@ import ( func TestAccEKSAddon_basic(t *testing.T) { ctx := acctest.Context(t) - var addon types.Addon + var addon eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) clusterResourceName := "aws_eks_cluster.test" addonResourceName := "aws_eks_addon.test" @@ -30,14 +29,14 @@ func TestAccEKSAddon_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_basic(rName, addonName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAddonExists(ctx, addonResourceName, addon), + testAccCheckAddonExists(ctx, addonResourceName, &addon), resource.TestCheckResourceAttr(addonResourceName, "addon_name", addonName), resource.TestCheckResourceAttrSet(addonResourceName, "addon_version"), acctest.MatchResourceAttrRegionalARN(addonResourceName, "arn", "eks", regexache.MustCompile(fmt.Sprintf("addon/%s/%s/.+$", rName, addonName))), @@ -58,21 +57,21 @@ func TestAccEKSAddon_basic(t *testing.T) { func TestAccEKSAddon_disappears(t *testing.T) { ctx := acctest.Context(t) - var addon types.Addon + var addon eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_basic(rName, addonName), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon), + testAccCheckAddonExists(ctx, resourceName, &addon), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfeks.ResourceAddon(), resourceName), ), ExpectNonEmptyPlan: true, @@ -83,7 +82,7 @@ func TestAccEKSAddon_disappears(t *testing.T) { func TestAccEKSAddon_Disappears_cluster(t *testing.T) { ctx := acctest.Context(t) - var addon types.Addon + var addon eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" clusterResourceName := "aws_eks_cluster.test" @@ -91,14 +90,14 @@ func TestAccEKSAddon_Disappears_cluster(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_basic(rName, addonName), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon), + testAccCheckAddonExists(ctx, resourceName, &addon), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfeks.ResourceCluster(), clusterResourceName), ), ExpectNonEmptyPlan: true, @@ -109,23 +108,23 @@ func TestAccEKSAddon_Disappears_cluster(t *testing.T) { func TestAccEKSAddon_addonVersion(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2 types.Addon + var addon1, addon2 eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" - addonVersion1 := "v1.14.1-eksbuild.1" - addonVersion2 := "v1.15.1-eksbuild.1" + addonVersion1 := "v1.12.5-eksbuild.2" + addonVersion2 := "v1.12.6-eksbuild.1" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_version(rName, addonName, addonVersion1), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon1), + testAccCheckAddonExists(ctx, resourceName, &addon1), resource.TestCheckResourceAttr(resourceName, "addon_version", addonVersion1), ), }, @@ -138,7 +137,7 @@ func TestAccEKSAddon_addonVersion(t *testing.T) { { Config: testAccAddonConfig_version(rName, addonName, addonVersion2), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon2), + testAccCheckAddonExists(ctx, resourceName, &addon2), resource.TestCheckResourceAttr(resourceName, "addon_version", addonVersion2), ), }, @@ -148,21 +147,21 @@ func TestAccEKSAddon_addonVersion(t *testing.T) { func TestAccEKSAddon_preserve(t *testing.T) { ctx := acctest.Context(t) - var addon types.Addon + var addon eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_preserve(rName, addonName), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon), + testAccCheckAddonExists(ctx, resourceName, &addon), resource.TestCheckResourceAttr(resourceName, "preserve", "true"), ), }, @@ -178,22 +177,22 @@ func TestAccEKSAddon_preserve(t *testing.T) { func TestAccEKSAddon_deprecated(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2, addon3 types.Addon + var addon1, addon2, addon3 eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonConfig_deprecated(rName, addonName, string(types.ResolveConflictsNone)), + Config: testAccAddonConfig_deprecated(rName, addonName, eks.ResolveConflictsNone), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon1), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", string(types.ResolveConflictsNone)), + testAccCheckAddonExists(ctx, resourceName, &addon1), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", eks.ResolveConflictsNone), ), }, { @@ -203,17 +202,17 @@ func TestAccEKSAddon_deprecated(t *testing.T) { ImportStateVerifyIgnore: []string{"resolve_conflicts"}, }, { - Config: testAccAddonConfig_deprecated(rName, addonName, string(types.ResolveConflictsOverwrite)), + Config: testAccAddonConfig_deprecated(rName, addonName, eks.ResolveConflictsOverwrite), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon2), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", string(types.ResolveConflictsOverwrite)), + testAccCheckAddonExists(ctx, resourceName, &addon2), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", eks.ResolveConflictsOverwrite), ), }, { - Config: testAccAddonConfig_deprecated(rName, addonName, string(types.ResolveConflictsPreserve)), + Config: testAccAddonConfig_deprecated(rName, addonName, eks.ResolveConflictsPreserve), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon3), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", string(types.ResolveConflictsPreserve)), + testAccCheckAddonExists(ctx, resourceName, &addon3), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", eks.ResolveConflictsPreserve), ), }, }, @@ -222,23 +221,23 @@ func TestAccEKSAddon_deprecated(t *testing.T) { func TestAccEKSAddon_resolveConflicts(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2, addon3 types.Addon + var addon1, addon2, addon3 eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonConfig_resolveConflicts(rName, addonName, string(types.ResolveConflictsNone), string(types.ResolveConflictsNone)), + Config: testAccAddonConfig_resolveConflicts(rName, addonName, eks.ResolveConflictsNone, eks.ResolveConflictsNone), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon1), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", string(types.ResolveConflictsNone)), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", string(types.ResolveConflictsNone)), + testAccCheckAddonExists(ctx, resourceName, &addon1), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", eks.ResolveConflictsNone), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", eks.ResolveConflictsNone), ), }, { @@ -248,19 +247,19 @@ func TestAccEKSAddon_resolveConflicts(t *testing.T) { ImportStateVerifyIgnore: []string{"resolve_conflicts_on_create", "resolve_conflicts_on_update"}, }, { - Config: testAccAddonConfig_resolveConflicts(rName, addonName, string(types.ResolveConflictsOverwrite), string(types.ResolveConflictsOverwrite)), + Config: testAccAddonConfig_resolveConflicts(rName, addonName, eks.ResolveConflictsOverwrite, eks.ResolveConflictsOverwrite), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon2), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", string(types.ResolveConflictsOverwrite)), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", string(types.ResolveConflictsOverwrite)), + testAccCheckAddonExists(ctx, resourceName, &addon2), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", eks.ResolveConflictsOverwrite), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", eks.ResolveConflictsOverwrite), ), }, { - Config: testAccAddonConfig_resolveConflicts(rName, addonName, string(types.ResolveConflictsOverwrite), string(types.ResolveConflictsPreserve)), + Config: testAccAddonConfig_resolveConflicts(rName, addonName, eks.ResolveConflictsOverwrite, eks.ResolveConflictsPreserve), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon3), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", string(types.ResolveConflictsOverwrite)), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", string(types.ResolveConflictsPreserve)), + testAccCheckAddonExists(ctx, resourceName, &addon3), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", eks.ResolveConflictsOverwrite), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", eks.ResolveConflictsPreserve), ), }, }, @@ -269,7 +268,7 @@ func TestAccEKSAddon_resolveConflicts(t *testing.T) { func TestAccEKSAddon_serviceAccountRoleARN(t *testing.T) { ctx := acctest.Context(t) - var addon types.Addon + var addon eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" serviceRoleResourceName := "aws_iam_role.test-service-role" @@ -277,14 +276,14 @@ func TestAccEKSAddon_serviceAccountRoleARN(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_serviceAccountRoleARN(rName, addonName), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon), + testAccCheckAddonExists(ctx, resourceName, &addon), resource.TestCheckResourceAttrPair(resourceName, "service_account_role_arn", serviceRoleResourceName, "arn"), ), }, @@ -299,7 +298,7 @@ func TestAccEKSAddon_serviceAccountRoleARN(t *testing.T) { func TestAccEKSAddon_configurationValues(t *testing.T) { ctx := acctest.Context(t) - var addon types.Addon + var addon eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" configurationValues := "{\"env\": {\"WARM_ENI_TARGET\":\"2\",\"ENABLE_POD_ENI\":\"true\"},\"resources\": {\"limits\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"}}}" @@ -307,18 +306,18 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { emptyConfigurationValues := "{}" invalidConfigurationValues := "{\"env\": {\"INVALID_FIELD\":\"2\"}}" addonName := "vpc-cni" - addonVersion := "v1.14.1-eksbuild.1" + addonVersion := "v1.12.6-eksbuild.1" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, configurationValues, string(types.ResolveConflictsOverwrite)), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, configurationValues, eks.ResolveConflictsOverwrite), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon), + testAccCheckAddonExists(ctx, resourceName, &addon), resource.TestCheckResourceAttr(resourceName, "configuration_values", configurationValues), ), }, @@ -329,21 +328,21 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { ImportStateVerifyIgnore: []string{"resolve_conflicts"}, }, { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, updateConfigurationValues, string(types.ResolveConflictsOverwrite)), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, updateConfigurationValues, eks.ResolveConflictsOverwrite), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon), + testAccCheckAddonExists(ctx, resourceName, &addon), resource.TestCheckResourceAttr(resourceName, "configuration_values", updateConfigurationValues), ), }, { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, emptyConfigurationValues, string(types.ResolveConflictsOverwrite)), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, emptyConfigurationValues, eks.ResolveConflictsOverwrite), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon), + testAccCheckAddonExists(ctx, resourceName, &addon), resource.TestCheckResourceAttr(resourceName, "configuration_values", emptyConfigurationValues), ), }, { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, invalidConfigurationValues, string(types.ResolveConflictsOverwrite)), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, invalidConfigurationValues, eks.ResolveConflictsOverwrite), ExpectError: regexache.MustCompile(`InvalidParameterException: ConfigurationValue provided in request is not supported`), }, }, @@ -352,21 +351,21 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { func TestAccEKSAddon_tags(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2, addon3 types.Addon + var addon1, addon2, addon3 eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonConfig_tags1(rName, addonName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon1), + testAccCheckAddonExists(ctx, resourceName, &addon1), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -379,7 +378,7 @@ func TestAccEKSAddon_tags(t *testing.T) { { Config: testAccAddonConfig_tags2(rName, addonName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon2), + testAccCheckAddonExists(ctx, resourceName, &addon2), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -388,7 +387,7 @@ func TestAccEKSAddon_tags(t *testing.T) { { Config: testAccAddonConfig_tags1(rName, addonName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, resourceName, addon3), + testAccCheckAddonExists(ctx, resourceName, &addon3), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -397,7 +396,7 @@ func TestAccEKSAddon_tags(t *testing.T) { }) } -func testAccCheckAddonExists(ctx context.Context, n string, v types.Addon) resource.TestCheckFunc { +func testAccCheckAddonExists(ctx context.Context, n string, v *eks.Addon) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -414,15 +413,15 @@ func testAccCheckAddonExists(ctx context.Context, n string, v types.Addon) resou return err } - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) - output, err := tfeks.FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) + output, err := tfeks.FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) if err != nil { return err } - v = *output + *v = *output return nil } @@ -430,7 +429,7 @@ func testAccCheckAddonExists(ctx context.Context, n string, v types.Addon) resou func testAccCheckAddonDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_addon" { @@ -443,7 +442,7 @@ func testAccCheckAddonDestroy(ctx context.Context) resource.TestCheckFunc { return err } - _, err = tfeks.FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) + _, err = tfeks.FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) if tfresource.NotFound(err) { continue @@ -461,9 +460,11 @@ func testAccCheckAddonDestroy(ctx context.Context) resource.TestCheckFunc { } func testAccPreCheckAddon(ctx context.Context, t *testing.T) { - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + input := &eks.DescribeAddonVersionsInput{} - _, err := client.DescribeAddonVersions(ctx, input) + + _, err := conn.DescribeAddonVersionsWithContext(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/eks/addon_version_data_source.go b/internal/service/eks/addon_version_data_source.go index 066b732d501..47264e58ea5 100644 --- a/internal/service/eks/addon_version_data_source.go +++ b/internal/service/eks/addon_version_data_source.go @@ -40,14 +40,14 @@ func DataSourceAddonVersion() *schema.Resource { } func dataSourceAddonVersionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) addonName := d.Get("addon_name").(string) kubernetesVersion := d.Get("kubernetes_version").(string) mostRecent := d.Get("most_recent").(bool) id := addonName - versionInfo, err := FindAddonVersionByAddonNameAndKubernetesVersion(ctx, client, id, kubernetesVersion, mostRecent) + versionInfo, err := FindAddonVersionByAddonNameAndKubernetesVersion(ctx, conn, id, kubernetesVersion, mostRecent) if err != nil { return diag.Errorf("reading EKS Add-On version info (%s, %s): %s", id, kubernetesVersion, err) diff --git a/internal/service/eks/addon_version_data_source_test.go b/internal/service/eks/addon_version_data_source_test.go index d5b40dcd83f..576fc022b33 100644 --- a/internal/service/eks/addon_version_data_source_test.go +++ b/internal/service/eks/addon_version_data_source_test.go @@ -7,8 +7,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -16,7 +15,7 @@ import ( func TestAccEKSAddonVersionDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - var addon types.Addon + var addon eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) versionDataSourceName := "data.aws_eks_addon_version.test" addonDataSourceName := "data.aws_eks_addon.test" @@ -24,26 +23,26 @@ func TestAccEKSAddonVersionDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, false), + Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, true), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, addonDataSourceName, addon), + testAccCheckAddonExists(ctx, addonDataSourceName, &addon), resource.TestCheckResourceAttrPair(versionDataSourceName, "version", addonDataSourceName, "addon_version"), resource.TestCheckResourceAttrPair(versionDataSourceName, "addon_name", addonDataSourceName, "addon_name"), - resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "false"), + resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "true"), ), }, { - Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, true), + Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, false), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, addonDataSourceName, addon), + testAccCheckAddonExists(ctx, addonDataSourceName, &addon), resource.TestCheckResourceAttrPair(versionDataSourceName, "version", addonDataSourceName, "addon_version"), resource.TestCheckResourceAttrPair(versionDataSourceName, "addon_name", addonDataSourceName, "addon_name"), - resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "true"), + resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "false"), ), }, }, diff --git a/internal/service/eks/arn.go b/internal/service/eks/arn.go index 59cfad8b72d..a8ece1dada7 100644 --- a/internal/service/eks/arn.go +++ b/internal/service/eks/arn.go @@ -16,7 +16,7 @@ import ( "fmt" "strings" - awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" + awsarn "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/endpoints" ) diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index d12c3b29aac..83bf69868ea 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -6,21 +6,18 @@ package eks import ( "context" "log" - "strings" "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/enum" - "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -84,8 +81,8 @@ func ResourceCluster() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateDiagFunc: enum.Validate[types.LogType](), + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(eks.LogType_Values(), true), }, Set: schema.HashString, }, @@ -114,7 +111,7 @@ func ResourceCluster() *schema.Resource { Required: true, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"secrets"}, false), + ValidateFunc: validation.StringInSlice(Resources_Values(), false), }, }, }, @@ -153,11 +150,11 @@ func ResourceCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "ip_family": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.IpFamily](), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(eks.IpFamily_Values(), false), }, "service_ipv4_cidr": { Type: schema.TypeString, @@ -295,7 +292,7 @@ func ResourceCluster() *schema.Resource { } func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) name := d.Get("name").(string) input := &eks.CreateClusterInput{ @@ -321,30 +318,31 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (interface{}, error) { - return client.CreateCluster(ctx, input) + return conn.CreateClusterWithContext(ctx, input) }, func(err error) (bool, error) { - if errs.IsA[*types.InvalidParameterException](err) { - // InvalidParameterException: roleArn, arn:aws:iam::123456789012:role/XXX, does not exist - if strings.Contains(err.Error(), "does not exist") { - return true, err - } - // InvalidParameterException: Error in role params - if strings.Contains(err.Error(), "Error in role params") { - return true, err - } - if strings.Contains(err.Error(), "Role could not be assumed because the trusted entity is not correct") { - return true, err - } - // InvalidParameterException: The provided role doesn't have the Amazon EKS Managed Policies associated with it. - // Please ensure the following policy is attached: arn:aws:iam::aws:policy/AmazonEKSClusterPolicy - if strings.Contains(err.Error(), "The provided role doesn't have the Amazon EKS Managed Policies associated with it") { - return true, err - } - // InvalidParameterException: IAM role's policy must include the `ec2:DescribeSubnets` action - if strings.Contains(err.Error(), "IAM role's policy must include") { - return true, err - } + // InvalidParameterException: roleArn, arn:aws:iam::123456789012:role/XXX, does not exist + if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "does not exist") { + return true, err + } + + // InvalidParameterException: Error in role params + if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "Error in role params") { + return true, err + } + + if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "Role could not be assumed because the trusted entity is not correct") { + return true, err + } + + // InvalidParameterException: The provided role doesn't have the Amazon EKS Managed Policies associated with it. Please ensure the following policy is attached: arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "The provided role doesn't have the Amazon EKS Managed Policies associated with it") { + return true, err + } + + // InvalidParameterException: IAM role's policy must include the `ec2:DescribeSubnets` action + if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "IAM role's policy must include") { + return true, err } return false, err @@ -355,15 +353,9 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int return diag.Errorf("creating EKS Cluster (%s): %s", name, err) } - d.SetId(aws.ToString(outputRaw.(*eks.CreateClusterOutput).Cluster.Name)) - - waiter := eks.NewClusterActiveWaiter(client) - waiterParams := &eks.DescribeClusterInput{ - Name: aws.String(d.Id()), - } + d.SetId(aws.StringValue(outputRaw.(*eks.CreateClusterOutput).Cluster.Name)) - err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutCreate)) - if err != nil { + if _, err := waitClusterCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return diag.Errorf("waiting for EKS Cluster (%s) create: %s", d.Id(), err) } @@ -371,9 +363,9 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) - cluster, err := FindClusterByName(ctx, client, d.Id()) + cluster, err := FindClusterByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Cluster (%s) not found, removing from state", d.Id()) @@ -393,7 +385,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter if cluster.OutpostConfig != nil { d.Set("cluster_id", cluster.Id) } - d.Set("created_at", aws.ToTime(cluster.CreatedAt).Format(time.RFC3339)) + d.Set("created_at", aws.TimeValue(cluster.CreatedAt).String()) if err := d.Set("enabled_cluster_log_types", flattenLogging(cluster.Logging)); err != nil { return diag.Errorf("setting enabled_cluster_log_types: %s", err) } @@ -425,7 +417,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter } func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) // Do any version update first. if d.HasChange("version") { @@ -434,15 +426,15 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int Version: aws.String(d.Get("version").(string)), } - output, err := client.UpdateClusterVersion(ctx, input) + output, err := conn.UpdateClusterVersionWithContext(ctx, input) if err != nil { return diag.Errorf("updating EKS Cluster (%s) version: %s", d.Id(), err) } - updateID := aws.ToString(output.Update.Id) + updateID := aws.StringValue(output.Update.Id) - _, err = waitClusterUpdateSuccessful(ctx, client, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) version update (%s): %s", d.Id(), updateID, err) @@ -458,15 +450,15 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int EncryptionConfig: expandEncryptionConfig(d.Get("encryption_config").([]interface{})), } - output, err := client.AssociateEncryptionConfig(ctx, input) + output, err := conn.AssociateEncryptionConfigWithContext(ctx, input) if err != nil { return diag.Errorf("associating EKS Cluster (%s) encryption config: %s", d.Id(), err) } - updateID := aws.ToString(output.Update.Id) + updateID := aws.StringValue(output.Update.Id) - _, err = waitClusterUpdateSuccessful(ctx, client, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) encryption config association (%s): %s", d.Id(), updateID, err) @@ -480,15 +472,15 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int Name: aws.String(d.Id()), } - output, err := client.UpdateClusterConfig(ctx, input) + output, err := conn.UpdateClusterConfigWithContext(ctx, input) if err != nil { return diag.Errorf("updating EKS Cluster (%s) logging: %s", d.Id(), err) } - updateID := aws.ToString(output.Update.Id) + updateID := aws.StringValue(output.Update.Id) - _, err = waitClusterUpdateSuccessful(ctx, client, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) logging update (%s): %s", d.Id(), updateID, err) @@ -501,15 +493,15 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int ResourcesVpcConfig: expandVPCConfigRequestForUpdate(d.Get("vpc_config").([]interface{})), } - output, err := client.UpdateClusterConfig(ctx, input) + output, err := conn.UpdateClusterConfigWithContext(ctx, input) if err != nil { return diag.Errorf("updating EKS Cluster (%s) VPC config: %s", d.Id(), err) } - updateID := aws.ToString(output.Update.Id) + updateID := aws.StringValue(output.Update.Id) - _, err = waitClusterUpdateSuccessful(ctx, client, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Cluster (%s) VPC config update (%s): %s", d.Id(), updateID, err) @@ -520,7 +512,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) log.Printf("[DEBUG] Deleting EKS Cluster: %s", d.Id()) @@ -533,9 +525,9 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int err := tfresource.Retry(ctx, clusterDeleteRetryTimeout, func() *retry.RetryError { var err error - _, err = client.DeleteCluster(ctx, input) + _, err = conn.DeleteClusterWithContext(ctx, input) - if errs.IsA[*types.ResourceInUseException](err) { + if tfawserr.ErrMessageContains(err, eks.ErrCodeResourceInUseException, "in progress") { return retry.RetryableError(err) } @@ -547,61 +539,45 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int }, tfresource.WithDelayRand(1*time.Minute), tfresource.WithPollInterval(30*time.Second)) if tfresource.TimedOut(err) { - _, err = client.DeleteCluster(ctx, input) + _, err = conn.DeleteClusterWithContext(ctx, input) } - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil } // Sometimes the EKS API returns the ResourceNotFound error in this form: // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if errs.IsA[*types.ClientException](err) { - if strings.Contains(err.Error(), "No cluster found for name:") { - return nil - } + if tfawserr.ErrMessageContains(err, eks.ErrCodeClientException, "No cluster found for name:") { + return nil } if err != nil { return diag.Errorf("deleting EKS Cluster (%s): %s", d.Id(), err) } - waiter := eks.NewClusterDeletedWaiter(client) - waiterParams := &eks.DescribeClusterInput{ - Name: aws.String(d.Id()), - } - - err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutDelete)) - if err != nil { + if _, err = waitClusterDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return diag.Errorf("waiting for EKS Cluster (%s) delete: %s", d.Id(), err) } return nil } -func FindClusterByName(ctx context.Context, client *eks.Client, name string) (*types.Cluster, error) { +func FindClusterByName(ctx context.Context, conn *eks.EKS, name string) (*eks.Cluster, error) { input := &eks.DescribeClusterInput{ Name: aws.String(name), } - output, err := client.DescribeCluster(ctx, input) + output, err := conn.DescribeClusterWithContext(ctx, input) // Sometimes the EKS API returns the ResourceNotFound error in this form: // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) || tfawserr.ErrMessageContains(err, eks.ErrCodeClientException, "No cluster found for name:") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, } } - if errs.IsA[*types.ClientException](err) { - if strings.Contains(err.Error(), "No cluster found for name:") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - } if err != nil { return nil, err @@ -614,15 +590,15 @@ func FindClusterByName(ctx context.Context, client *eks.Client, name string) (*t return output.Cluster, nil } -func findClusterUpdateByTwoPartKey(ctx context.Context, client *eks.Client, name, id string) (*types.Update, error) { +func findClusterUpdateByTwoPartKey(ctx context.Context, conn *eks.EKS, name, id string) (*eks.Update, error) { input := &eks.DescribeUpdateInput{ Name: aws.String(name), UpdateId: aws.String(id), } - output, err := client.DescribeUpdate(ctx, input) + output, err := conn.DescribeUpdateWithContext(ctx, input) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -640,9 +616,9 @@ func findClusterUpdateByTwoPartKey(ctx context.Context, client *eks.Client, name return output.Update, nil } -func statusCluster(ctx context.Context, client *eks.Client, name string) retry.StateRefreshFunc { +func statusCluster(ctx context.Context, conn *eks.EKS, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindClusterByName(ctx, client, name) + output, err := FindClusterByName(ctx, conn, name) if tfresource.NotFound(err) { return nil, "", nil @@ -652,13 +628,13 @@ func statusCluster(ctx context.Context, client *eks.Client, name string) retry.S return nil, "", err } - return output, string(output.Status), nil + return output, aws.StringValue(output.Status), nil } } -func statusClusterUpdate(ctx context.Context, client *eks.Client, name, id string) retry.StateRefreshFunc { +func statusClusterUpdate(ctx context.Context, conn *eks.EKS, name, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := findClusterUpdateByTwoPartKey(ctx, client, name, id) + output, err := findClusterUpdateByTwoPartKey(ctx, conn, name, id) if tfresource.NotFound(err) { return nil, "", nil @@ -668,22 +644,56 @@ func statusClusterUpdate(ctx context.Context, client *eks.Client, name, id strin return nil, "", err } - return output, string(output.Status), nil + return output, aws.StringValue(output.Status), nil } } -func waitClusterUpdateSuccessful(ctx context.Context, client *eks.Client, name, id string, timeout time.Duration) (*types.Update, error) { +func waitClusterCreated(ctx context.Context, conn *eks.EKS, name string, timeout time.Duration) (*eks.Cluster, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(types.UpdateStatusInProgress), - Target: enum.Slice(types.UpdateStatusSuccessful), - Refresh: statusClusterUpdate(ctx, client, name, id), + Pending: []string{eks.ClusterStatusPending, eks.ClusterStatusCreating}, + Target: []string{eks.ClusterStatusActive}, + Refresh: statusCluster(ctx, conn, name), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*types.Update); ok { - if status := output.Status; status == types.UpdateStatusCancelled || status == types.UpdateStatusFailed { + if output, ok := outputRaw.(*eks.Cluster); ok { + return output, err + } + + return nil, err +} + +func waitClusterDeleted(ctx context.Context, conn *eks.EKS, name string, timeout time.Duration) (*eks.Cluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eks.ClusterStatusActive, eks.ClusterStatusDeleting}, + Target: []string{}, + Refresh: statusCluster(ctx, conn, name), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*eks.Cluster); ok { + return output, err + } + + return nil, err +} + +func waitClusterUpdateSuccessful(ctx context.Context, conn *eks.EKS, name, id string, timeout time.Duration) (*eks.Update, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{eks.UpdateStatusInProgress}, + Target: []string{eks.UpdateStatusSuccessful}, + Refresh: statusClusterUpdate(ctx, conn, name, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*eks.Update); ok { + if status := aws.StringValue(output.Status); status == eks.UpdateStatusCancelled || status == eks.UpdateStatusFailed { tfresource.SetLastError(err, ErrorDetailsError(output.Errors)) } @@ -693,12 +703,12 @@ func waitClusterUpdateSuccessful(ctx context.Context, client *eks.Client, name, return nil, err } -func expandEncryptionConfig(tfList []interface{}) []types.EncryptionConfig { +func expandEncryptionConfig(tfList []interface{}) []*eks.EncryptionConfig { if len(tfList) == 0 { return nil } - var apiObjects []types.EncryptionConfig + var apiObjects []*eks.EncryptionConfig for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -707,35 +717,28 @@ func expandEncryptionConfig(tfList []interface{}) []types.EncryptionConfig { continue } - apiObject := &types.EncryptionConfig{ + apiObject := &eks.EncryptionConfig{ Provider: expandProvider(tfMap["provider"].([]interface{})), } if v, ok := tfMap["resources"].(*schema.Set); ok && v.Len() > 0 { - apiObject.Resources = make([]string, v.Len()) - for i, r := range v.List() { - apiObject.Resources[i] = r.(string) - } + apiObject.Resources = flex.ExpandStringSet(v) } - apiObjects = append(apiObjects, *apiObject) + apiObjects = append(apiObjects, apiObject) } return apiObjects } -func expandProvider(tfList []interface{}) *types.Provider { - if len(tfList) == 0 { - return nil - } - +func expandProvider(tfList []interface{}) *eks.Provider { tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - apiObject := &types.Provider{} + apiObject := &eks.Provider{} if v, ok := tfMap["key_arn"].(string); ok && v != "" { apiObject.KeyArn = aws.String(v) @@ -744,18 +747,14 @@ func expandProvider(tfList []interface{}) *types.Provider { return apiObject } -func expandOutpostConfigRequest(tfList []interface{}) *types.OutpostConfigRequest { - if len(tfList) == 0 { - return nil - } - - tfMap, ok := tfList[0].(map[string]interface{}) +func expandOutpostConfigRequest(l []interface{}) *eks.OutpostConfigRequest { + tfMap, ok := l[0].(map[string]interface{}) if !ok { return nil } - outpostConfigRequest := &types.OutpostConfigRequest{} + outpostConfigRequest := &eks.OutpostConfigRequest{} if v, ok := tfMap["control_plane_instance_type"].(string); ok && v != "" { outpostConfigRequest.ControlPlaneInstanceType = aws.String(v) @@ -766,17 +765,13 @@ func expandOutpostConfigRequest(tfList []interface{}) *types.OutpostConfigReques } if v, ok := tfMap["outpost_arns"].(*schema.Set); ok && v.Len() > 0 { - outpostArns := make([]string, 0, v.Len()) - for _, outpostArn := range flex.ExpandStringSet(v) { - outpostArns = append(outpostArns, *outpostArn) - } - outpostConfigRequest.OutpostArns = outpostArns + outpostConfigRequest.OutpostArns = flex.ExpandStringSet(v) } return outpostConfigRequest } -func expandControlPlanePlacement(tfList []interface{}) *types.ControlPlanePlacementRequest { +func expandControlPlanePlacement(tfList []interface{}) *eks.ControlPlanePlacementRequest { if len(tfList) == 0 { return nil } @@ -787,7 +782,7 @@ func expandControlPlanePlacement(tfList []interface{}) *types.ControlPlanePlacem return nil } - apiObject := &types.ControlPlanePlacementRequest{} + apiObject := &eks.ControlPlanePlacementRequest{} if v, ok := tfMap["group_name"].(string); ok && v != "" { apiObject.GroupName = aws.String(v) @@ -796,133 +791,100 @@ func expandControlPlanePlacement(tfList []interface{}) *types.ControlPlanePlacem return apiObject } -func expandVPCConfigRequestForCreate(tfList []interface{}) *types.VpcConfigRequest { - if len(tfList) == 0 { +func expandVPCConfigRequestForCreate(l []interface{}) *eks.VpcConfigRequest { + if len(l) == 0 { return nil } - m := tfList[0].(map[string]interface{}) - - securityGroupIds := flex.ExpandStringSet(m["security_group_ids"].(*schema.Set)) - securityGroupIdsSlice := make([]string, len(securityGroupIds)) - for i, id := range securityGroupIds { - securityGroupIdsSlice[i] = *id - } - - subnetIds := flex.ExpandStringSet(m["subnet_ids"].(*schema.Set)) - subnetIdsSlice := make([]string, len(subnetIds)) - for i, id := range subnetIds { - subnetIdsSlice[i] = *id - } + m := l[0].(map[string]interface{}) - vpcConfigRequest := &types.VpcConfigRequest{ + vpcConfigRequest := &eks.VpcConfigRequest{ EndpointPrivateAccess: aws.Bool(m["endpoint_private_access"].(bool)), EndpointPublicAccess: aws.Bool(m["endpoint_public_access"].(bool)), - SecurityGroupIds: securityGroupIdsSlice, - SubnetIds: subnetIdsSlice, + SecurityGroupIds: flex.ExpandStringSet(m["security_group_ids"].(*schema.Set)), + SubnetIds: flex.ExpandStringSet(m["subnet_ids"].(*schema.Set)), } if v, ok := m["public_access_cidrs"].(*schema.Set); ok && v.Len() > 0 { - publicAccessCidrs := flex.ExpandStringSet(v) - vpcConfigRequest.PublicAccessCidrs = make([]string, len(publicAccessCidrs)) - for i, cidr := range publicAccessCidrs { - vpcConfigRequest.PublicAccessCidrs[i] = *cidr - } + vpcConfigRequest.PublicAccessCidrs = flex.ExpandStringSet(v) } return vpcConfigRequest } -func expandVPCConfigRequestForUpdate(tfList []interface{}) *types.VpcConfigRequest { - if len(tfList) == 0 { +func expandVPCConfigRequestForUpdate(l []interface{}) *eks.VpcConfigRequest { + if len(l) == 0 { return nil } - m := tfList[0].(map[string]interface{}) + m := l[0].(map[string]interface{}) - vpcConfigRequest := &types.VpcConfigRequest{ + vpcConfigRequest := &eks.VpcConfigRequest{ EndpointPrivateAccess: aws.Bool(m["endpoint_private_access"].(bool)), EndpointPublicAccess: aws.Bool(m["endpoint_public_access"].(bool)), } if v, ok := m["public_access_cidrs"].(*schema.Set); ok && v.Len() > 0 { - publicAccessCidrs := flex.ExpandStringSet(v) - vpcConfigRequest.PublicAccessCidrs = make([]string, len(publicAccessCidrs)) - for i, cidr := range publicAccessCidrs { - vpcConfigRequest.PublicAccessCidrs[i] = *cidr - } + vpcConfigRequest.PublicAccessCidrs = flex.ExpandStringSet(v) } return vpcConfigRequest } -func expandKubernetesNetworkConfigRequest(tfList []interface{}) *types.KubernetesNetworkConfigRequest { - if len(tfList) == 0 { +func expandKubernetesNetworkConfigRequest(tfList []interface{}) *eks.KubernetesNetworkConfigRequest { + tfMap, ok := tfList[0].(map[string]interface{}) + + if !ok { return nil } - m := tfList[0].(map[string]interface{}) + apiObject := &eks.KubernetesNetworkConfigRequest{} - apiObject := &types.KubernetesNetworkConfigRequest{} - - if v, ok := m["service_ipv4_cidr"].(string); ok && v != "" { + if v, ok := tfMap["service_ipv4_cidr"].(string); ok && v != "" { apiObject.ServiceIpv4Cidr = aws.String(v) } - if v, ok := m["ip_family"]; ok && v != "" { - apiObject.IpFamily = v.(types.IpFamily) + if v, ok := tfMap["ip_family"].(string); ok && v != "" { + apiObject.IpFamily = aws.String(v) } return apiObject } -func expandLogging(vEnabledLogTypes *schema.Set) *types.Logging { - logTypes := []interface{}{} - - for _, logType := range enum.Values[types.LogType]() { - logTypes = append(logTypes, logType) - } - aLogTypes := schema.NewSet(schema.HashString, logTypes) - - enabledLogTypes := make([]types.LogType, len(vEnabledLogTypes.List())) - for i, s := range vEnabledLogTypes.List() { - enabledLogTypes[i] = types.LogType(s.(string)) - } - - diff := aLogTypes.Difference(vEnabledLogTypes) - - disabledLogTypes := make([]types.LogType, len(diff.List())) - for i, s := range diff.List() { - disabledLogTypes[i] = types.LogType(s.(string)) +func expandLogging(vEnabledLogTypes *schema.Set) *eks.Logging { + vEksLogTypes := []interface{}{} + for _, eksLogType := range eks.LogType_Values() { + vEksLogTypes = append(vEksLogTypes, eksLogType) } + vAllLogTypes := schema.NewSet(schema.HashString, vEksLogTypes) - return &types.Logging{ - ClusterLogging: []types.LogSetup{ + return &eks.Logging{ + ClusterLogging: []*eks.LogSetup{ { Enabled: aws.Bool(true), - Types: enabledLogTypes, + Types: flex.ExpandStringSet(vEnabledLogTypes), }, { Enabled: aws.Bool(false), - Types: disabledLogTypes, + Types: flex.ExpandStringSet(vAllLogTypes.Difference(vEnabledLogTypes)), }, }, } } -func flattenCertificate(certificate *types.Certificate) []map[string]interface{} { +func flattenCertificate(certificate *eks.Certificate) []map[string]interface{} { if certificate == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "data": certificate.Data, + "data": aws.StringValue(certificate.Data), } return []map[string]interface{}{m} } -func flattenIdentity(identity *types.Identity) []map[string]interface{} { +func flattenIdentity(identity *eks.Identity) []map[string]interface{} { if identity == nil { return []map[string]interface{}{} } @@ -934,19 +896,19 @@ func flattenIdentity(identity *types.Identity) []map[string]interface{} { return []map[string]interface{}{m} } -func flattenOIDC(oidc *types.OIDC) []map[string]interface{} { +func flattenOIDC(oidc *eks.OIDC) []map[string]interface{} { if oidc == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "issuer": oidc.Issuer, + "issuer": aws.StringValue(oidc.Issuer), } return []map[string]interface{}{m} } -func flattenEncryptionConfig(apiObjects []types.EncryptionConfig) []interface{} { +func flattenEncryptionConfig(apiObjects []*eks.EncryptionConfig) []interface{} { if len(apiObjects) == 0 { return nil } @@ -956,7 +918,7 @@ func flattenEncryptionConfig(apiObjects []types.EncryptionConfig) []interface{} for _, apiObject := range apiObjects { tfMap := map[string]interface{}{ "provider": flattenProvider(apiObject.Provider), - "resources": apiObject.Resources, + "resources": aws.StringValueSlice(apiObject.Resources), } tfList = append(tfList, tfMap) @@ -965,58 +927,43 @@ func flattenEncryptionConfig(apiObjects []types.EncryptionConfig) []interface{} return tfList } -func flattenProvider(apiObject *types.Provider) []interface{} { +func flattenProvider(apiObject *eks.Provider) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "key_arn": apiObject.KeyArn, + "key_arn": aws.StringValue(apiObject.KeyArn), } return []interface{}{tfMap} } -func flattenVPCConfigResponse(vpcConfig *types.VpcConfigResponse) []map[string]interface{} { +func flattenVPCConfigResponse(vpcConfig *eks.VpcConfigResponse) []map[string]interface{} { if vpcConfig == nil { return []map[string]interface{}{} } - securityGroupIds := make([]*string, len(vpcConfig.SecurityGroupIds)) - for i, id := range vpcConfig.SecurityGroupIds { - securityGroupIds[i] = aws.String(id) - } - - subnetIds := make([]*string, len(vpcConfig.SubnetIds)) - for i, id := range vpcConfig.SubnetIds { - subnetIds[i] = aws.String(id) - } - - publicAccessCidrs := make([]*string, len(vpcConfig.PublicAccessCidrs)) - for i, cidr := range vpcConfig.PublicAccessCidrs { - publicAccessCidrs[i] = aws.String(cidr) - } - m := map[string]interface{}{ - "cluster_security_group_id": vpcConfig.ClusterSecurityGroupId, - "endpoint_private_access": vpcConfig.EndpointPrivateAccess, - "endpoint_public_access": vpcConfig.EndpointPublicAccess, - "security_group_ids": flex.FlattenStringSet(securityGroupIds), - "subnet_ids": flex.FlattenStringSet(subnetIds), - "public_access_cidrs": flex.FlattenStringSet(publicAccessCidrs), - "vpc_id": vpcConfig.VpcId, + "cluster_security_group_id": aws.StringValue(vpcConfig.ClusterSecurityGroupId), + "endpoint_private_access": aws.BoolValue(vpcConfig.EndpointPrivateAccess), + "endpoint_public_access": aws.BoolValue(vpcConfig.EndpointPublicAccess), + "security_group_ids": flex.FlattenStringSet(vpcConfig.SecurityGroupIds), + "subnet_ids": flex.FlattenStringSet(vpcConfig.SubnetIds), + "public_access_cidrs": flex.FlattenStringSet(vpcConfig.PublicAccessCidrs), + "vpc_id": aws.StringValue(vpcConfig.VpcId), } return []map[string]interface{}{m} } -func flattenLogging(logging *types.Logging) *schema.Set { - enabledLogTypes := []types.LogType{} +func flattenLogging(logging *eks.Logging) *schema.Set { + enabledLogTypes := []*string{} if logging != nil { logSetups := logging.ClusterLogging for _, logSetup := range logSetups { - if !aws.ToBool(logSetup.Enabled) { + if logSetup == nil || !aws.BoolValue(logSetup.Enabled) { continue } @@ -1024,49 +971,44 @@ func flattenLogging(logging *types.Logging) *schema.Set { } } - enabledLogTypePointers := make([]*string, len(enabledLogTypes)) - for i, logType := range enabledLogTypes { - enabledLogTypePointers[i] = aws.String(string(logType)) - } - - return flex.FlattenStringSet(enabledLogTypePointers) + return flex.FlattenStringSet(enabledLogTypes) } -func flattenKubernetesNetworkConfigResponse(apiObject *types.KubernetesNetworkConfigResponse) []interface{} { +func flattenKubernetesNetworkConfigResponse(apiObject *eks.KubernetesNetworkConfigResponse) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "service_ipv4_cidr": apiObject.ServiceIpv4Cidr, - "service_ipv6_cidr": apiObject.ServiceIpv6Cidr, - "ip_family": apiObject.IpFamily, + "service_ipv4_cidr": aws.StringValue(apiObject.ServiceIpv4Cidr), + "service_ipv6_cidr": aws.StringValue(apiObject.ServiceIpv6Cidr), + "ip_family": aws.StringValue(apiObject.IpFamily), } return []interface{}{tfMap} } -func flattenOutpostConfigResponse(apiObject *types.OutpostConfigResponse) []interface{} { +func flattenOutpostConfigResponse(apiObject *eks.OutpostConfigResponse) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "control_plane_instance_type": apiObject.ControlPlaneInstanceType, + "control_plane_instance_type": aws.StringValue(apiObject.ControlPlaneInstanceType), "control_plane_placement": flattenControlPlanePlacementResponse(apiObject.ControlPlanePlacement), - "outpost_arns": apiObject.OutpostArns, + "outpost_arns": aws.StringValueSlice(apiObject.OutpostArns), } return []interface{}{tfMap} } -func flattenControlPlanePlacementResponse(apiObject *types.ControlPlanePlacementResponse) []interface{} { +func flattenControlPlanePlacementResponse(apiObject *eks.ControlPlanePlacementResponse) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "group_name": apiObject.GroupName, + "group_name": aws.StringValue(apiObject.GroupName), } return []interface{}{tfMap} diff --git a/internal/service/eks/cluster_auth_data_source_test.go b/internal/service/eks/cluster_auth_data_source_test.go index f4e4feda9a0..2e58cd74b3f 100644 --- a/internal/service/eks/cluster_auth_data_source_test.go +++ b/internal/service/eks/cluster_auth_data_source_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -20,7 +20,7 @@ func TestAccEKSClusterAuthDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { diff --git a/internal/service/eks/cluster_data_source.go b/internal/service/eks/cluster_data_source.go index c5895beabd6..18cc7eeeb9f 100644 --- a/internal/service/eks/cluster_data_source.go +++ b/internal/service/eks/cluster_data_source.go @@ -5,9 +5,8 @@ package eks import ( "context" - "time" - "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -191,11 +190,11 @@ func DataSourceCluster() *schema.Resource { } func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig name := d.Get("name").(string) - cluster, err := FindClusterByName(ctx, client, name) + cluster, err := FindClusterByName(ctx, conn, name) if err != nil { return diag.Errorf("reading EKS Cluster (%s): %s", name, err) @@ -210,7 +209,7 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int if cluster.OutpostConfig != nil { d.Set("cluster_id", cluster.Id) } - d.Set("created_at", aws.ToTime(cluster.CreatedAt).Format(time.RFC3339)) + d.Set("created_at", aws.TimeValue(cluster.CreatedAt).String()) if err := d.Set("enabled_cluster_log_types", flattenLogging(cluster.Logging)); err != nil { return diag.Errorf("setting enabled_cluster_log_types: %s", err) } diff --git a/internal/service/eks/cluster_data_source_test.go b/internal/service/eks/cluster_data_source_test.go index 1bc5331e486..4cab4dd9196 100644 --- a/internal/service/eks/cluster_data_source_test.go +++ b/internal/service/eks/cluster_data_source_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -21,7 +21,7 @@ func TestAccEKSClusterDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -70,7 +70,7 @@ func TestAccEKSClusterDataSource_outpost(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/eks/cluster_test.go b/internal/service/eks/cluster_test.go index 52bed0cd2e6..f4a8e94dae8 100644 --- a/internal/service/eks/cluster_test.go +++ b/internal/service/eks/cluster_test.go @@ -11,9 +11,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -24,19 +23,19 @@ import ( ) const ( - clusterVersionUpgradeInitial = "1.27" - clusterVersionUpgradeUpdated = "1.28" + clusterVersionUpgradeInitial = "1.21" + clusterVersionUpgradeUpdated = "1.22" ) func TestAccEKSCluster_basic(t *testing.T) { ctx := acctest.Context(t) - var cluster types.Cluster + var cluster eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -58,7 +57,7 @@ func TestAccEKSCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.ip_family", "ipv4"), resource.TestMatchResourceAttr(resourceName, "platform_version", regexache.MustCompile(`^eks\.\d+$`)), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "status", string(types.ClusterStatusActive)), + resource.TestCheckResourceAttr(resourceName, "status", eks.ClusterStatusActive), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestMatchResourceAttr(resourceName, "version", regexache.MustCompile(`^\d+\.\d+$`)), resource.TestCheckResourceAttr(resourceName, "vpc_config.#", "1"), @@ -80,13 +79,13 @@ func TestAccEKSCluster_basic(t *testing.T) { func TestAccEKSCluster_disappears(t *testing.T) { ctx := acctest.Context(t) - var cluster types.Cluster + var cluster eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -104,14 +103,14 @@ func TestAccEKSCluster_disappears(t *testing.T) { func TestAccEKSCluster_Encryption_create(t *testing.T) { ctx := acctest.Context(t) - var cluster types.Cluster + var cluster eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" kmsKeyResourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -136,14 +135,14 @@ func TestAccEKSCluster_Encryption_create(t *testing.T) { func TestAccEKSCluster_Encryption_update(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 types.Cluster + var cluster1, cluster2 eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" kmsKeyResourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -177,14 +176,14 @@ func TestAccEKSCluster_Encryption_update(t *testing.T) { // https://github.com/hashicorp/terraform-provider-aws/issues/19968. func TestAccEKSCluster_Encryption_versionUpdate(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 types.Cluster + var cluster1, cluster2 eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" kmsKeyResourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -222,13 +221,13 @@ func TestAccEKSCluster_Encryption_versionUpdate(t *testing.T) { func TestAccEKSCluster_version(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 types.Cluster + var cluster1, cluster2 eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -258,13 +257,13 @@ func TestAccEKSCluster_version(t *testing.T) { func TestAccEKSCluster_logging(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 types.Cluster + var cluster1, cluster2 eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -306,13 +305,13 @@ func TestAccEKSCluster_logging(t *testing.T) { func TestAccEKSCluster_tags(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2, cluster3 types.Cluster + var cluster1, cluster2, cluster3 eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -352,13 +351,13 @@ func TestAccEKSCluster_tags(t *testing.T) { func TestAccEKSCluster_VPC_securityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var cluster types.Cluster + var cluster eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -381,13 +380,13 @@ func TestAccEKSCluster_VPC_securityGroupIDs(t *testing.T) { func TestAccEKSCluster_VPC_endpointPrivateAccess(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2, cluster3 types.Cluster + var cluster1, cluster2, cluster3 eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -428,13 +427,13 @@ func TestAccEKSCluster_VPC_endpointPrivateAccess(t *testing.T) { func TestAccEKSCluster_VPC_endpointPublicAccess(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2, cluster3 types.Cluster + var cluster1, cluster2, cluster3 eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -475,13 +474,13 @@ func TestAccEKSCluster_VPC_endpointPublicAccess(t *testing.T) { func TestAccEKSCluster_VPC_publicAccessCIDRs(t *testing.T) { ctx := acctest.Context(t) - var cluster types.Cluster + var cluster eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -512,13 +511,13 @@ func TestAccEKSCluster_VPC_publicAccessCIDRs(t *testing.T) { func TestAccEKSCluster_Network_serviceIPv4CIDR(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 types.Cluster + var cluster1, cluster2 eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -575,13 +574,13 @@ func TestAccEKSCluster_Network_serviceIPv4CIDR(t *testing.T) { func TestAccEKSCluster_Network_ipFamily(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 types.Cluster + var cluster1, cluster2 eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -626,14 +625,14 @@ func TestAccEKSCluster_Network_ipFamily(t *testing.T) { func TestAccEKSCluster_Outpost_create(t *testing.T) { ctx := acctest.Context(t) - var cluster types.Cluster + var cluster eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" controlPlaneInstanceType := "m5d.large" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -658,14 +657,14 @@ func TestAccEKSCluster_Outpost_create(t *testing.T) { func TestAccEKSCluster_Outpost_placement(t *testing.T) { ctx := acctest.Context(t) - var cluster types.Cluster + var cluster eks.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" controlPlaneInstanceType := "m5d.large" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -689,7 +688,7 @@ func TestAccEKSCluster_Outpost_placement(t *testing.T) { }) } -func testAccCheckClusterExists(ctx context.Context, resourceName string, cluster *types.Cluster) resource.TestCheckFunc { +func testAccCheckClusterExists(ctx context.Context, resourceName string, cluster *eks.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -699,15 +698,15 @@ func testAccCheckClusterExists(ctx context.Context, resourceName string, cluster return fmt.Errorf("No EKS Cluster ID is set") } - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) - output, err := tfeks.FindClusterByName(ctx, client, rs.Primary.ID) + output, err := tfeks.FindClusterByName(ctx, conn, rs.Primary.ID) if err != nil { return err } - cluster = output + *cluster = *output return nil } @@ -720,9 +719,9 @@ func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { continue } - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) - _, err := tfeks.FindClusterByName(ctx, client, rs.Primary.ID) + _, err := tfeks.FindClusterByName(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue @@ -739,9 +738,9 @@ func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckClusterRecreated(i, j *types.Cluster) resource.TestCheckFunc { +func testAccCheckClusterRecreated(i, j *eks.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { + if aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { return errors.New("EKS Cluster was not recreated") } @@ -749,9 +748,9 @@ func testAccCheckClusterRecreated(i, j *types.Cluster) resource.TestCheckFunc { } } -func testAccCheckClusterNotRecreated(i, j *types.Cluster) resource.TestCheckFunc { +func testAccCheckClusterNotRecreated(i, j *eks.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { + if !aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { return errors.New("EKS Cluster was recreated") } @@ -760,11 +759,11 @@ func testAccCheckClusterNotRecreated(i, j *types.Cluster) resource.TestCheckFunc } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) input := &eks.ListClustersInput{} - _, err := conn.ListClusters(ctx, input) + _, err := conn.ListClustersWithContext(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/eks/clusters_data_source.go b/internal/service/eks/clusters_data_source.go index e47c9001214..ffdd46b1a90 100644 --- a/internal/service/eks/clusters_data_source.go +++ b/internal/service/eks/clusters_data_source.go @@ -6,7 +6,8 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -30,22 +31,27 @@ func DataSourceClusters() *schema.Resource { func dataSourceClustersRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) - var clusters []string + var clusters []*string - paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) - for paginator.HasMorePages() { - output, err := paginator.NextPage(ctx) - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing EKS Clusters: %s", err) + err := conn.ListClustersPagesWithContext(ctx, &eks.ListClustersInput{}, func(page *eks.ListClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - clusters = append(clusters, output.Clusters...) + clusters = append(clusters, page.Clusters...) + + return !lastPage + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing EKS Clusters: %s", err) } d.SetId(meta.(*conns.AWSClient).Region) - d.Set("names", clusters) + + d.Set("names", aws.StringValueSlice(clusters)) return diags } diff --git a/internal/service/eks/clusters_data_source_test.go b/internal/service/eks/clusters_data_source_test.go index c7da75e874b..a35a5c0f752 100644 --- a/internal/service/eks/clusters_data_source_test.go +++ b/internal/service/eks/clusters_data_source_test.go @@ -6,7 +6,7 @@ package eks_test import ( "testing" - "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -19,7 +19,7 @@ func TestAccEKSClustersDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/eks/consts.go b/internal/service/eks/consts.go index cb821b1bc14..4bf92304f5c 100644 --- a/internal/service/eks/consts.go +++ b/internal/service/eks/consts.go @@ -11,6 +11,16 @@ const ( IdentityProviderConfigTypeOIDC = "oidc" ) +const ( + ResourcesSecrets = "secrets" +) + +func Resources_Values() []string { + return []string{ + ResourcesSecrets, + } +} + const ( propagationTimeout = 2 * time.Minute ) diff --git a/internal/service/eks/errors.go b/internal/service/eks/errors.go index 809e42933d2..ee8ac774ab2 100644 --- a/internal/service/eks/errors.go +++ b/internal/service/eks/errors.go @@ -7,84 +7,84 @@ import ( "fmt" "strings" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/eks" multierror "github.com/hashicorp/go-multierror" ) -func AddonIssueError(apiObject *types.AddonIssue) error { +func AddonIssueError(apiObject *eks.AddonIssue) error { if apiObject == nil { return nil } - return awserr.New(string(apiObject.Code), aws.ToString(apiObject.Message), nil) + return awserr.New(aws.StringValue(apiObject.Code), aws.StringValue(apiObject.Message), nil) } -func AddonIssuesError(apiObjects []types.AddonIssue) error { +func AddonIssuesError(apiObjects []*eks.AddonIssue) error { var errors *multierror.Error for _, apiObject := range apiObjects { - if &apiObject == nil { + if apiObject == nil { continue } - err := AddonIssueError(&apiObject) + err := AddonIssueError(apiObject) if err != nil { - errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(apiObject.ResourceIds, ", "), err)) + errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(aws.StringValueSlice(apiObject.ResourceIds), ", "), err)) } } return errors.ErrorOrNil() } -func ErrorDetailError(apiObject types.ErrorDetail) error { - if &apiObject == nil { +func ErrorDetailError(apiObject *eks.ErrorDetail) error { + if apiObject == nil { return nil } - return awserr.New(string(apiObject.ErrorCode), aws.ToString(apiObject.ErrorMessage), nil) + return awserr.New(aws.StringValue(apiObject.ErrorCode), aws.StringValue(apiObject.ErrorMessage), nil) } -func ErrorDetailsError(apiObjects []types.ErrorDetail) error { +func ErrorDetailsError(apiObjects []*eks.ErrorDetail) error { var errors *multierror.Error for _, apiObject := range apiObjects { - if &apiObject == nil { + if apiObject == nil { continue } err := ErrorDetailError(apiObject) if err != nil { - errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(apiObject.ResourceIds, ", "), err)) + errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(aws.StringValueSlice(apiObject.ResourceIds), ", "), err)) } } return errors.ErrorOrNil() } -func IssueError(apiObject *types.Issue) error { +func IssueError(apiObject *eks.Issue) error { if apiObject == nil { return nil } - return awserr.New(string(apiObject.Code), aws.ToString(apiObject.Message), nil) + return awserr.New(aws.StringValue(apiObject.Code), aws.StringValue(apiObject.Message), nil) } -func IssuesError(apiObjects []types.Issue) error { +func IssuesError(apiObjects []*eks.Issue) error { var errors *multierror.Error for _, apiObject := range apiObjects { - if &apiObject == nil { + if apiObject == nil { continue } - err := IssueError(&apiObject) + err := IssueError(apiObject) if err != nil { - errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(apiObject.ResourceIds, ", "), err)) + errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(aws.StringValueSlice(apiObject.ResourceIds), ", "), err)) } } diff --git a/internal/service/eks/fargate_profile.go b/internal/service/eks/fargate_profile.go index a4addb0176e..55a4954086d 100644 --- a/internal/service/eks/fargate_profile.go +++ b/internal/service/eks/fargate_profile.go @@ -7,19 +7,17 @@ import ( "context" "fmt" "log" - "strings" "time" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -111,7 +109,7 @@ func ResourceFargateProfile() *schema.Resource { func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName := d.Get("cluster_name").(string) fargateProfileName := d.Get("fargate_profile_name").(string) @@ -122,7 +120,7 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m FargateProfileName: aws.String(fargateProfileName), PodExecutionRoleArn: aws.String(d.Get("pod_execution_role_arn").(string)), Selectors: expandFargateProfileSelectors(d.Get("selector").(*schema.Set).List()), - Subnets: flex.ExpandStringValueSet(d.Get("subnet_ids").(*schema.Set)), + Subnets: flex.ExpandStringSet(d.Get("subnet_ids").(*schema.Set)), Tags: getTagsIn(ctx), } @@ -132,14 +130,12 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m defer conns.GlobalMutexKV.Unlock(mutexKey) err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { - _, err := client.CreateFargateProfile(ctx, input) + _, err := conn.CreateFargateProfileWithContext(ctx, input) // Retry for IAM eventual consistency on error: // InvalidParameterException: Misconfigured PodExecutionRole Trust Policy; Please add the eks-fargate-pods.amazonaws.com Service Principal - if errs.IsA[*types.InvalidParameterException](err) { - if strings.Contains(err.Error(), "Misconfigured PodExecutionRole Trust Policy") { - return retry.RetryableError(err) - } + if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "Misconfigured PodExecutionRole Trust Policy") { + return retry.RetryableError(err) } if err != nil { @@ -150,7 +146,7 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m }) if tfresource.TimedOut(err) { - _, err = client.CreateFargateProfile(ctx, input) + _, err = conn.CreateFargateProfileWithContext(ctx, input) } if err != nil { @@ -159,13 +155,8 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m d.SetId(profileID) - waiter := eks.NewFargateProfileActiveWaiter(client) - waiterParams := &eks.DescribeFargateProfileInput{ - ClusterName: aws.String(clusterName), - FargateProfileName: aws.String(fargateProfileName), - } + _, err = waitFargateProfileCreated(ctx, conn, clusterName, fargateProfileName, d.Timeout(schema.TimeoutCreate)) - err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutCreate)) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EKS Fargate Profile (%s) to create: %s", d.Id(), err) } @@ -175,7 +166,7 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m func resourceFargateProfileRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, fargateProfileName, err := FargateProfileParseResourceID(d.Id()) @@ -183,7 +174,7 @@ func resourceFargateProfileRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "reading EKS Fargate Profile (%s): %s", d.Id(), err) } - fargateProfile, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) + fargateProfile, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Fargate Profile (%s) not found, removing from state", d.Id()) @@ -206,7 +197,7 @@ func resourceFargateProfileRead(ctx context.Context, d *schema.ResourceData, met d.Set("status", fargateProfile.Status) - if err := d.Set("subnet_ids", fargateProfile.Subnets); err != nil { + if err := d.Set("subnet_ids", aws.StringValueSlice(fargateProfile.Subnets)); err != nil { return sdkdiag.AppendErrorf(diags, "setting subnet_ids: %s", err) } @@ -225,7 +216,7 @@ func resourceFargateProfileUpdate(ctx context.Context, d *schema.ResourceData, m func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, fargateProfileName, err := FargateProfileParseResourceID(d.Id()) @@ -239,12 +230,12 @@ func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, m defer conns.GlobalMutexKV.Unlock(mutexKey) log.Printf("[DEBUG] Deleting EKS Fargate Profile: %s", d.Id()) - _, err = client.DeleteFargateProfile(ctx, &eks.DeleteFargateProfileInput{ + _, err = conn.DeleteFargateProfileWithContext(ctx, &eks.DeleteFargateProfileInput{ ClusterName: aws.String(clusterName), FargateProfileName: aws.String(fargateProfileName), }) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return diags } @@ -252,13 +243,8 @@ func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "deleting EKS Fargate Profile (%s): %s", d.Id(), err) } - waiter := eks.NewFargateProfileDeletedWaiter(client) - waiterParams := &eks.DescribeFargateProfileInput{ - ClusterName: aws.String(clusterName), - FargateProfileName: aws.String(fargateProfileName), - } + _, err = waitFargateProfileDeleted(ctx, conn, clusterName, fargateProfileName, d.Timeout(schema.TimeoutDelete)) - err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutDelete)) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting EKS Fargate Profile (%s): waiting for completion: %s", d.Id(), err) } @@ -266,27 +252,24 @@ func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func expandFargateProfileSelectors(l []interface{}) []types.FargateProfileSelector { +func expandFargateProfileSelectors(l []interface{}) []*eks.FargateProfileSelector { if len(l) == 0 { return nil } - fargateProfileSelectors := make([]types.FargateProfileSelector, 0, len(l)) + fargateProfileSelectors := make([]*eks.FargateProfileSelector, 0, len(l)) for _, mRaw := range l { m, ok := mRaw.(map[string]interface{}) + if !ok { continue } - fargateProfileSelector := types.FargateProfileSelector{} + fargateProfileSelector := &eks.FargateProfileSelector{} if v, ok := m["labels"].(map[string]interface{}); ok && len(v) > 0 { - fargateProfileSelector.Labels = make(map[string]string) - for key, value := range flex.ExpandStringMap(v) { - val := value - fargateProfileSelector.Labels[key] = *val - } + fargateProfileSelector.Labels = flex.ExpandStringMap(v) } if v, ok := m["namespace"].(string); ok && v != "" { @@ -299,7 +282,7 @@ func expandFargateProfileSelectors(l []interface{}) []types.FargateProfileSelect return fargateProfileSelectors } -func flattenFargateProfileSelectors(fargateProfileSelectors []types.FargateProfileSelector) []map[string]interface{} { +func flattenFargateProfileSelectors(fargateProfileSelectors []*eks.FargateProfileSelector) []map[string]interface{} { if len(fargateProfileSelectors) == 0 { return []map[string]interface{}{} } @@ -308,8 +291,8 @@ func flattenFargateProfileSelectors(fargateProfileSelectors []types.FargateProfi for _, fargateProfileSelector := range fargateProfileSelectors { m := map[string]interface{}{ - "labels": fargateProfileSelector.Labels, - "namespace": fargateProfileSelector.Namespace, + "labels": aws.StringValueMap(fargateProfileSelector.Labels), + "namespace": aws.StringValue(fargateProfileSelector.Namespace), } l = append(l, m) diff --git a/internal/service/eks/fargate_profile_test.go b/internal/service/eks/fargate_profile_test.go index 99ba4507c20..7bfca00372f 100644 --- a/internal/service/eks/fargate_profile_test.go +++ b/internal/service/eks/fargate_profile_test.go @@ -9,9 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,7 +22,7 @@ import ( func TestAccEKSFargateProfile_basic(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile types.FargateProfile + var fargateProfile eks.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) eksClusterResourceName := "aws_eks_cluster.test" iamRoleResourceName := "aws_iam_role.pod" @@ -31,7 +30,7 @@ func TestAccEKSFargateProfile_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -44,7 +43,7 @@ func TestAccEKSFargateProfile_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "fargate_profile_name", rName), resource.TestCheckResourceAttrPair(resourceName, "pod_execution_role_arn", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "selector.#", "1"), - resource.TestCheckResourceAttr(resourceName, "status", string(types.FargateProfileStatusActive)), + resource.TestCheckResourceAttr(resourceName, "status", eks.FargateProfileStatusActive), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "2"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -60,13 +59,13 @@ func TestAccEKSFargateProfile_basic(t *testing.T) { func TestAccEKSFargateProfile_disappears(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile types.FargateProfile + var fargateProfile eks.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -84,14 +83,14 @@ func TestAccEKSFargateProfile_disappears(t *testing.T) { func TestAccEKSFargateProfile_Multi_profile(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile types.FargateProfile + var fargateProfile eks.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName1 := "aws_eks_fargate_profile.test.0" resourceName2 := "aws_eks_fargate_profile.test.1" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -108,13 +107,13 @@ func TestAccEKSFargateProfile_Multi_profile(t *testing.T) { func TestAccEKSFargateProfile_Selector_labels(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile1 types.FargateProfile + var fargateProfile1 eks.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -135,13 +134,13 @@ func TestAccEKSFargateProfile_Selector_labels(t *testing.T) { func TestAccEKSFargateProfile_tags(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile1, fargateProfile2, fargateProfile3 types.FargateProfile + var fargateProfile1, fargateProfile2, fargateProfile3 eks.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -179,7 +178,7 @@ func TestAccEKSFargateProfile_tags(t *testing.T) { }) } -func testAccCheckFargateProfileExists(ctx context.Context, n string, v *types.FargateProfile) resource.TestCheckFunc { +func testAccCheckFargateProfileExists(ctx context.Context, n string, v *eks.FargateProfile) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -196,9 +195,9 @@ func testAccCheckFargateProfileExists(ctx context.Context, n string, v *types.Fa return err } - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) - output, err := tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) + output, err := tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) if err != nil { return err @@ -212,7 +211,7 @@ func testAccCheckFargateProfileExists(ctx context.Context, n string, v *types.Fa func testAccCheckFargateProfileDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_fargate_profile" { @@ -225,7 +224,7 @@ func testAccCheckFargateProfileDestroy(ctx context.Context) resource.TestCheckFu return err } - _, err = tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) + _, err = tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) if tfresource.NotFound(err) { continue diff --git a/internal/service/eks/find.go b/internal/service/eks/find.go index 48fc403bc76..2d5b5e6e11e 100644 --- a/internal/service/eks/find.go +++ b/internal/service/eks/find.go @@ -6,22 +6,21 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -func FindAddonByClusterNameAndAddonName(ctx context.Context, client *eks.Client, clusterName, addonName string) (*types.Addon, error) { +func FindAddonByClusterNameAndAddonName(ctx context.Context, conn *eks.EKS, clusterName, addonName string) (*eks.Addon, error) { input := &eks.DescribeAddonInput{ AddonName: aws.String(addonName), ClusterName: aws.String(clusterName), } - output, err := client.DescribeAddon(ctx, input) + output, err := conn.DescribeAddonWithContext(ctx, input) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -42,16 +41,16 @@ func FindAddonByClusterNameAndAddonName(ctx context.Context, client *eks.Client, return output.Addon, nil } -func FindAddonUpdateByClusterNameAddonNameAndID(ctx context.Context, client *eks.Client, clusterName, addonName, id string) (*types.Update, error) { +func FindAddonUpdateByClusterNameAddonNameAndID(ctx context.Context, conn *eks.EKS, clusterName, addonName, id string) (*eks.Update, error) { input := &eks.DescribeUpdateInput{ AddonName: aws.String(addonName), Name: aws.String(clusterName), UpdateId: aws.String(id), } - output, err := client.DescribeUpdate(ctx, input) + output, err := conn.DescribeUpdateWithContext(ctx, input) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -72,45 +71,47 @@ func FindAddonUpdateByClusterNameAddonNameAndID(ctx context.Context, client *eks return output.Update, nil } -func FindAddonVersionByAddonNameAndKubernetesVersion(ctx context.Context, client *eks.Client, addonName, kubernetesVersion string, mostRecent bool) (*types.AddonVersionInfo, error) { +func FindAddonVersionByAddonNameAndKubernetesVersion(ctx context.Context, conn *eks.EKS, addonName, kubernetesVersion string, mostRecent bool) (*eks.AddonVersionInfo, error) { input := &eks.DescribeAddonVersionsInput{ AddonName: aws.String(addonName), KubernetesVersion: aws.String(kubernetesVersion), } - var version *types.AddonVersionInfo + var version *eks.AddonVersionInfo - paginator := eks.NewDescribeAddonVersionsPaginator(client, input) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if errs.IsA[*types.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err + err := conn.DescribeAddonVersionsPagesWithContext(ctx, input, func(page *eks.DescribeAddonVersionsOutput, lastPage bool) bool { + if page == nil || len(page.Addons) == 0 { + return !lastPage } for _, addon := range page.Addons { for i, addonVersion := range addon.AddonVersions { if mostRecent && i == 0 { - version = &addonVersion - break + version = addonVersion + return !lastPage } for _, versionCompatibility := range addonVersion.Compatibilities { - if bool(versionCompatibility.DefaultVersion) { - version = &addonVersion - break + if aws.BoolValue(versionCompatibility.DefaultVersion) { + version = addonVersion + return !lastPage } } } } + return lastPage + }) + + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err } - if version.AddonVersion == nil { + if version == nil || version.AddonVersion == nil { return nil, &retry.NotFoundError{ Message: "Empty result", LastRequest: input, @@ -120,15 +121,15 @@ func FindAddonVersionByAddonNameAndKubernetesVersion(ctx context.Context, client return version, nil } -func FindFargateProfileByClusterNameAndFargateProfileName(ctx context.Context, client *eks.Client, clusterName, fargateProfileName string) (*types.FargateProfile, error) { +func FindFargateProfileByClusterNameAndFargateProfileName(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string) (*eks.FargateProfile, error) { input := &eks.DescribeFargateProfileInput{ ClusterName: aws.String(clusterName), FargateProfileName: aws.String(fargateProfileName), } - output, err := client.DescribeFargateProfile(ctx, input) + output, err := conn.DescribeFargateProfileWithContext(ctx, input) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -149,15 +150,15 @@ func FindFargateProfileByClusterNameAndFargateProfileName(ctx context.Context, c return output.FargateProfile, nil } -func FindNodegroupByClusterNameAndNodegroupName(ctx context.Context, client *eks.Client, clusterName, nodeGroupName string) (*types.Nodegroup, error) { +func FindNodegroupByClusterNameAndNodegroupName(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string) (*eks.Nodegroup, error) { input := &eks.DescribeNodegroupInput{ ClusterName: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), } - output, err := client.DescribeNodegroup(ctx, input) + output, err := conn.DescribeNodegroupWithContext(ctx, input) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -178,16 +179,16 @@ func FindNodegroupByClusterNameAndNodegroupName(ctx context.Context, client *eks return output.Nodegroup, nil } -func FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx context.Context, client *eks.Client, clusterName, nodeGroupName, id string) (*types.Update, error) { +func FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName, id string) (*eks.Update, error) { input := &eks.DescribeUpdateInput{ Name: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), UpdateId: aws.String(id), } - output, err := client.DescribeUpdate(ctx, input) + output, err := conn.DescribeUpdateWithContext(ctx, input) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -208,18 +209,18 @@ func FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx context.Context, cli return output.Update, nil } -func FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx context.Context, client *eks.Client, clusterName, configName string) (*types.OidcIdentityProviderConfig, error) { +func FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx context.Context, conn *eks.EKS, clusterName, configName string) (*eks.OidcIdentityProviderConfig, error) { input := &eks.DescribeIdentityProviderConfigInput{ ClusterName: aws.String(clusterName), - IdentityProviderConfig: &types.IdentityProviderConfig{ + IdentityProviderConfig: &eks.IdentityProviderConfig{ Name: aws.String(configName), Type: aws.String(IdentityProviderConfigTypeOIDC), }, } - output, err := client.DescribeIdentityProviderConfig(ctx, input) + output, err := conn.DescribeIdentityProviderConfigWithContext(ctx, input) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/eks/generate.go b/internal/service/eks/generate.go index 91be6cd1d41..d9cf18f5965 100644 --- a/internal/service/eks/generate.go +++ b/internal/service/eks/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ServiceTagsMap -KVTValues -SkipTypesImp -ListTags -UpdateTags +//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/eks/identity_provider_config.go b/internal/service/eks/identity_provider_config.go index 5f6e418e5a6..7bd35298854 100644 --- a/internal/service/eks/identity_provider_config.go +++ b/internal/service/eks/identity_provider_config.go @@ -6,18 +6,16 @@ package eks import ( "context" "log" - "strings" "time" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -133,7 +131,7 @@ func ResourceIdentityProviderConfig() *schema.Resource { } func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName := d.Get("cluster_name").(string) configName, oidc := expandOIDCIdentityProviderConfigRequest(d.Get("oidc").([]interface{})[0].(map[string]interface{})) @@ -145,7 +143,7 @@ func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.Resourc Tags: getTagsIn(ctx), } - _, err := client.AssociateIdentityProviderConfig(ctx, input) + _, err := conn.AssociateIdentityProviderConfigWithContext(ctx, input) if err != nil { return diag.Errorf("associating EKS Identity Provider Config (%s): %s", idpID, err) @@ -153,7 +151,7 @@ func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.Resourc d.SetId(idpID) - _, err = waitOIDCIdentityProviderConfigCreated(ctx, client, clusterName, configName, d.Timeout(schema.TimeoutCreate)) + _, err = waitOIDCIdentityProviderConfigCreated(ctx, conn, clusterName, configName, d.Timeout(schema.TimeoutCreate)) if err != nil { return diag.Errorf("waiting for EKS Identity Provider Config (%s) association: %s", d.Id(), err) @@ -163,7 +161,7 @@ func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.Resourc } func resourceIdentityProviderConfigRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, configName, err := IdentityProviderConfigParseResourceID(d.Id()) @@ -171,7 +169,7 @@ func resourceIdentityProviderConfigRead(ctx context.Context, d *schema.ResourceD return diag.FromErr(err) } - oidc, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, client, clusterName, configName) + oidc, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Identity Provider Config (%s) not found, removing from state", d.Id()) @@ -203,7 +201,7 @@ func resourceIdentityProviderConfigUpdate(ctx context.Context, d *schema.Resourc } func resourceIdentityProviderConfigDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, configName, err := IdentityProviderConfigParseResourceID(d.Id()) @@ -212,29 +210,27 @@ func resourceIdentityProviderConfigDelete(ctx context.Context, d *schema.Resourc } log.Printf("[DEBUG] Disassociating EKS Identity Provider Config: %s", d.Id()) - _, err = client.DisassociateIdentityProviderConfig(ctx, &eks.DisassociateIdentityProviderConfigInput{ + _, err = conn.DisassociateIdentityProviderConfigWithContext(ctx, &eks.DisassociateIdentityProviderConfigInput{ ClusterName: aws.String(clusterName), - IdentityProviderConfig: &types.IdentityProviderConfig{ + IdentityProviderConfig: &eks.IdentityProviderConfig{ Name: aws.String(configName), Type: aws.String(IdentityProviderConfigTypeOIDC), }, }) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil } - if errs.IsA[*types.InvalidRequestException](err) { - if strings.Contains(err.Error(), "Identity provider config is not associated with cluster") { - return nil - } + if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidRequestException, "Identity provider config is not associated with cluster") { + return nil } if err != nil { return diag.Errorf("disassociating EKS Identity Provider Config (%s): %s", d.Id(), err) } - _, err = waitOIDCIdentityProviderConfigDeleted(ctx, client, clusterName, configName, d.Timeout(schema.TimeoutDelete)) + _, err = waitOIDCIdentityProviderConfigDeleted(ctx, conn, clusterName, configName, d.Timeout(schema.TimeoutDelete)) if err != nil { return diag.Errorf("waiting for EKS Identity Provider Config (%s) disassociation: %s", d.Id(), err) @@ -243,12 +239,12 @@ func resourceIdentityProviderConfigDelete(ctx context.Context, d *schema.Resourc return nil } -func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (string, *types.OidcIdentityProviderConfigRequest) { +func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (string, *eks.OidcIdentityProviderConfigRequest) { if tfMap == nil { return "", nil } - apiObject := &types.OidcIdentityProviderConfigRequest{} + apiObject := &eks.OidcIdentityProviderConfigRequest{} if v, ok := tfMap["client_id"].(string); ok && v != "" { apiObject.ClientId = aws.String(v) @@ -273,7 +269,7 @@ func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (stri } if v, ok := tfMap["required_claims"].(map[string]interface{}); ok && len(v) > 0 { - apiObject.RequiredClaims = flex.ExpandStringValueMap(v) + apiObject.RequiredClaims = flex.ExpandStringMap(v) } if v, ok := tfMap["username_claim"].(string); ok && v != "" { @@ -287,7 +283,7 @@ func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (stri return identityProviderConfigName, apiObject } -func flattenOIDCIdentityProviderConfig(apiObject *types.OidcIdentityProviderConfig) map[string]interface{} { +func flattenOIDCIdentityProviderConfig(apiObject *eks.OidcIdentityProviderConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -295,35 +291,35 @@ func flattenOIDCIdentityProviderConfig(apiObject *types.OidcIdentityProviderConf tfMap := map[string]interface{}{} if v := apiObject.ClientId; v != nil { - tfMap["client_id"] = v + tfMap["client_id"] = aws.StringValue(v) } if v := apiObject.GroupsClaim; v != nil { - tfMap["groups_claim"] = v + tfMap["groups_claim"] = aws.StringValue(v) } if v := apiObject.GroupsPrefix; v != nil { - tfMap["groups_prefix"] = v + tfMap["groups_prefix"] = aws.StringValue(v) } if v := apiObject.IdentityProviderConfigName; v != nil { - tfMap["identity_provider_config_name"] = v + tfMap["identity_provider_config_name"] = aws.StringValue(v) } if v := apiObject.IssuerUrl; v != nil { - tfMap["issuer_url"] = v + tfMap["issuer_url"] = aws.StringValue(v) } if v := apiObject.RequiredClaims; v != nil { - tfMap["required_claims"] = v + tfMap["required_claims"] = aws.StringValueMap(v) } if v := apiObject.UsernameClaim; v != nil { - tfMap["username_claim"] = v + tfMap["username_claim"] = aws.StringValue(v) } if v := apiObject.UsernamePrefix; v != nil { - tfMap["username_prefix"] = v + tfMap["username_prefix"] = aws.StringValue(v) } return tfMap diff --git a/internal/service/eks/identity_provider_config_test.go b/internal/service/eks/identity_provider_config_test.go index 6294a41a77b..e0fd7f1fa95 100644 --- a/internal/service/eks/identity_provider_config_test.go +++ b/internal/service/eks/identity_provider_config_test.go @@ -9,8 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,14 +21,14 @@ import ( func TestAccEKSIdentityProviderConfig_basic(t *testing.T) { ctx := acctest.Context(t) - var config types.OidcIdentityProviderConfig + var config eks.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) eksClusterResourceName := "aws_eks_cluster.test" resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -66,13 +65,13 @@ func TestAccEKSIdentityProviderConfig_basic(t *testing.T) { func TestAccEKSIdentityProviderConfig_disappears(t *testing.T) { ctx := acctest.Context(t) - var config types.OidcIdentityProviderConfig + var config eks.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -90,13 +89,13 @@ func TestAccEKSIdentityProviderConfig_disappears(t *testing.T) { func TestAccEKSIdentityProviderConfig_allOIDCOptions(t *testing.T) { ctx := acctest.Context(t) - var config types.OidcIdentityProviderConfig + var config eks.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -128,13 +127,13 @@ func TestAccEKSIdentityProviderConfig_allOIDCOptions(t *testing.T) { func TestAccEKSIdentityProviderConfig_tags(t *testing.T) { ctx := acctest.Context(t) - var config types.OidcIdentityProviderConfig + var config eks.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -172,7 +171,7 @@ func TestAccEKSIdentityProviderConfig_tags(t *testing.T) { }) } -func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName string, config *types.OidcIdentityProviderConfig) resource.TestCheckFunc { +func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName string, config *eks.OidcIdentityProviderConfig) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -189,9 +188,9 @@ func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName return err } - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) - output, err := tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, client, clusterName, configName) + output, err := tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) if err != nil { return err @@ -205,7 +204,7 @@ func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName func testAccCheckIdentityProviderConfigDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_identity_provider_config" { @@ -218,7 +217,7 @@ func testAccCheckIdentityProviderConfigDestroy(ctx context.Context) resource.Tes return err } - _, err = tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, client, clusterName, configName) + _, err = tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) if tfresource.NotFound(err) { continue diff --git a/internal/service/eks/node_group.go b/internal/service/eks/node_group.go index 586e20529d2..ad1a8af448b 100644 --- a/internal/service/eks/node_group.go +++ b/internal/service/eks/node_group.go @@ -9,17 +9,15 @@ import ( "reflect" "time" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" - "github.com/hashicorp/terraform-provider-aws/internal/enum" - "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -49,22 +47,22 @@ func ResourceNodeGroup() *schema.Resource { Schema: map[string]*schema.Schema{ "ami_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.AMITypes](), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(eks.AMITypes_Values(), false), }, "arn": { Type: schema.TypeString, Computed: true, }, "capacity_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.CapacityTypes](), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(eks.CapacityTypes_Values(), false), }, "cluster_name": { Type: schema.TypeString, @@ -250,9 +248,9 @@ func ResourceNodeGroup() *schema.Resource { ValidateFunc: validation.StringLenBetween(0, 63), }, "effect": { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[types.TaintEffect](), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(eks.TaintEffect_Values(), false), }, }, }, @@ -295,7 +293,7 @@ func ResourceNodeGroup() *schema.Resource { } func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName := d.Get("cluster_name").(string) nodeGroupName := create.Name(d.Get("node_group_name").(string), d.Get("node_group_name_prefix").(string)) @@ -305,28 +303,28 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i ClusterName: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), NodeRole: aws.String(d.Get("node_role_arn").(string)), - Subnets: flex.ExpandStringValueSet(d.Get("subnet_ids").(*schema.Set)), + Subnets: flex.ExpandStringSet(d.Get("subnet_ids").(*schema.Set)), Tags: getTagsIn(ctx), } if v, ok := d.GetOk("ami_type"); ok { - input.AmiType = types.AMITypes(v.(string)) + input.AmiType = aws.String(v.(string)) } if v, ok := d.GetOk("capacity_type"); ok { - input.CapacityType = types.CapacityTypes(v.(string)) + input.CapacityType = aws.String(v.(string)) } if v, ok := d.GetOk("disk_size"); ok { - input.DiskSize = aws.Int32(int32(v.(int))) + input.DiskSize = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("instance_types"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.InstanceTypes = flex.ExpandStringValueList(v.([]interface{})) + input.InstanceTypes = flex.ExpandStringList(v.([]interface{})) } if v := d.Get("labels").(map[string]interface{}); len(v) > 0 { - input.Labels = flex.ExpandStringValueMap(v) + input.Labels = flex.ExpandStringMap(v) } if v := d.Get("launch_template").([]interface{}); len(v) > 0 { @@ -357,7 +355,7 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i input.Version = aws.String(v.(string)) } - _, err := client.CreateNodegroup(ctx, input) + _, err := conn.CreateNodegroupWithContext(ctx, input) if err != nil { return diag.Errorf("creating EKS Node Group (%s): %s", groupID, err) @@ -365,13 +363,8 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i d.SetId(groupID) - waiter := eks.NewNodegroupActiveWaiter(client) - waiterParams := &eks.DescribeNodegroupInput{ - ClusterName: aws.String(clusterName), - NodegroupName: aws.String(nodeGroupName), - } + _, err = waitNodegroupCreated(ctx, conn, clusterName, nodeGroupName, d.Timeout(schema.TimeoutCreate)) - err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutCreate)) if err != nil { return diag.Errorf("waiting for EKS Node Group (%s) to create: %s", d.Id(), err) } @@ -380,7 +373,7 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i } func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, nodeGroupName, err := NodeGroupParseResourceID(d.Id()) @@ -388,7 +381,7 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int return diag.FromErr(err) } - nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) + nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Node Group (%s) not found, removing from state", d.Id()) @@ -406,11 +399,11 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("cluster_name", nodeGroup.ClusterName) d.Set("disk_size", nodeGroup.DiskSize) - if err := d.Set("instance_types", nodeGroup.InstanceTypes); err != nil { + if err := d.Set("instance_types", aws.StringValueSlice(nodeGroup.InstanceTypes)); err != nil { return diag.Errorf("setting instance_types: %s", err) } - if err := d.Set("labels", nodeGroup.Labels); err != nil { + if err := d.Set("labels", aws.StringValueMap(nodeGroup.Labels)); err != nil { return diag.Errorf("setting labels: %s", err) } @@ -419,7 +412,7 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int } d.Set("node_group_name", nodeGroup.NodegroupName) - d.Set("node_group_name_prefix", create.NamePrefixFromName(aws.ToString(nodeGroup.NodegroupName))) + d.Set("node_group_name_prefix", create.NamePrefixFromName(aws.StringValue(nodeGroup.NodegroupName))) d.Set("node_role_arn", nodeGroup.NodeRole) d.Set("release_version", nodeGroup.ReleaseVersion) @@ -441,7 +434,7 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("status", nodeGroup.Status) - if err := d.Set("subnet_ids", nodeGroup.Subnets); err != nil { + if err := d.Set("subnet_ids", aws.StringValueSlice(nodeGroup.Subnets)); err != nil { return diag.Errorf("setting subnets: %s", err) } @@ -465,7 +458,7 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int } func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, nodeGroupName, err := NodeGroupParseResourceID(d.Id()) @@ -478,7 +471,7 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i input := &eks.UpdateNodegroupVersionInput{ ClientRequestToken: aws.String(id.UniqueId()), ClusterName: aws.String(clusterName), - Force: *aws.Bool(d.Get("force_update_version").(bool)), + Force: aws.Bool(d.Get("force_update_version").(bool)), NodegroupName: aws.String(nodeGroupName), } @@ -510,15 +503,15 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i input.Version = aws.String(v.(string)) } - output, err := client.UpdateNodegroupVersion(ctx, input) + output, err := conn.UpdateNodegroupVersionWithContext(ctx, input) if err != nil { return diag.Errorf("updating EKS Node Group (%s) version: %s", d.Id(), err) } - updateID := aws.ToString(output.Update.Id) + updateID := aws.StringValue(output.Update.Id) - _, err = waitNodegroupUpdateSuccessful(ctx, client, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitNodegroupUpdateSuccessful(ctx, conn, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Node Group (%s) version update (%s): %s", d.Id(), updateID, err) @@ -549,15 +542,15 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i } } - output, err := client.UpdateNodegroupConfig(ctx, input) + output, err := conn.UpdateNodegroupConfigWithContext(ctx, input) if err != nil { return diag.Errorf("updating EKS Node Group (%s) config: %s", d.Id(), err) } - updateID := aws.ToString(output.Update.Id) + updateID := aws.StringValue(output.Update.Id) - _, err = waitNodegroupUpdateSuccessful(ctx, client, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) + _, err = waitNodegroupUpdateSuccessful(ctx, conn, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("waiting for EKS Node Group (%s) config update (%s): %s", d.Id(), updateID, err) @@ -568,7 +561,7 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i } func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) clusterName, nodeGroupName, err := NodeGroupParseResourceID(d.Id()) @@ -577,12 +570,12 @@ func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta i } log.Printf("[DEBUG] Deleting EKS Node Group: %s", d.Id()) - _, err = client.DeleteNodegroup(ctx, &eks.DeleteNodegroupInput{ + _, err = conn.DeleteNodegroupWithContext(ctx, &eks.DeleteNodegroupInput{ ClusterName: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), }) - if errs.IsA[*types.ResourceNotFoundException](err) { + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { return nil } @@ -590,13 +583,8 @@ func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta i return diag.Errorf("deleting EKS Node Group (%s): %s", d.Id(), err) } - waiter := eks.NewNodegroupDeletedWaiter(client) - waiterParams := &eks.DescribeNodegroupInput{ - ClusterName: aws.String(clusterName), - NodegroupName: aws.String(nodeGroupName), - } + _, err = waitNodegroupDeleted(ctx, conn, clusterName, nodeGroupName, d.Timeout(schema.TimeoutDelete)) - err = waiter.Wait(ctx, waiterParams, d.Timeout(schema.TimeoutDelete)) if err != nil { return diag.Errorf("waiting for EKS Node Group (%s) to delete: %s", d.Id(), err) } @@ -604,14 +592,14 @@ func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta i return nil } -func expandLaunchTemplateSpecification(l []interface{}) *types.LaunchTemplateSpecification { +func expandLaunchTemplateSpecification(l []interface{}) *eks.LaunchTemplateSpecification { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - config := &types.LaunchTemplateSpecification{} + config := &eks.LaunchTemplateSpecification{} if v, ok := m["id"].(string); ok && v != "" { config.Id = aws.String(v) @@ -628,34 +616,34 @@ func expandLaunchTemplateSpecification(l []interface{}) *types.LaunchTemplateSpe return config } -func expandNodegroupScalingConfig(tfMap map[string]interface{}) *types.NodegroupScalingConfig { +func expandNodegroupScalingConfig(tfMap map[string]interface{}) *eks.NodegroupScalingConfig { if tfMap == nil { return nil } - apiObject := &types.NodegroupScalingConfig{} + apiObject := &eks.NodegroupScalingConfig{} if v, ok := tfMap["desired_size"].(int); ok { - apiObject.DesiredSize = aws.Int32(int32(v)) + apiObject.DesiredSize = aws.Int64(int64(v)) } if v, ok := tfMap["max_size"].(int); ok && v != 0 { - apiObject.MaxSize = aws.Int32(int32(v)) + apiObject.MaxSize = aws.Int64(int64(v)) } if v, ok := tfMap["min_size"].(int); ok { - apiObject.MinSize = aws.Int32(int32(v)) + apiObject.MinSize = aws.Int64(int64(v)) } return apiObject } -func expandTaints(l []interface{}) []types.Taint { +func expandTaints(l []interface{}) []*eks.Taint { if len(l) == 0 { return nil } - var taints []types.Taint + var taints []*eks.Taint for _, raw := range l { t, ok := raw.(map[string]interface{}) @@ -664,7 +652,7 @@ func expandTaints(l []interface{}) []types.Taint { continue } - taint := types.Taint{} + taint := &eks.Taint{} if k, ok := t["key"].(string); ok { taint.Key = aws.String(k) @@ -675,7 +663,7 @@ func expandTaints(l []interface{}) []types.Taint { } if e, ok := t["effect"].(string); ok { - taint.Effect = types.TaintEffect(e) + taint.Effect = aws.String(e) } taints = append(taints, taint) @@ -684,17 +672,25 @@ func expandTaints(l []interface{}) []types.Taint { return taints } -func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *types.UpdateTaintsPayload { +func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *eks.UpdateTaintsPayload { oldTaints := expandTaints(oldTaintsRaw) newTaints := expandTaints(newTaintsRaw) - var removedTaints []types.Taint + var removedTaints []*eks.Taint for _, ot := range oldTaints { + if ot == nil { + continue + } + removed := true for _, nt := range newTaints { + if nt == nil { + continue + } + // if both taint.key and taint.effect are the same, we don't need to remove it. - if aws.ToString(nt.Key) == aws.ToString(ot.Key) && - string(nt.Effect) == string(ot.Effect) { + if aws.StringValue(nt.Key) == aws.StringValue(ot.Key) && + aws.StringValue(nt.Effect) == aws.StringValue(ot.Effect) { removed = false break } @@ -705,10 +701,18 @@ func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *types. } } - var updatedTaints []types.Taint + var updatedTaints []*eks.Taint for _, nt := range newTaints { + if nt == nil { + continue + } + updated := true for _, ot := range oldTaints { + if nt == nil { + continue + } + if reflect.DeepEqual(nt, ot) { updated = false break @@ -723,7 +727,7 @@ func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *types. return nil } - updateTaintsPayload := &types.UpdateTaintsPayload{} + updateTaintsPayload := &eks.UpdateTaintsPayload{} if len(removedTaints) > 0 { updateTaintsPayload.RemoveTaints = removedTaints @@ -736,45 +740,45 @@ func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *types. return updateTaintsPayload } -func expandRemoteAccessConfig(l []interface{}) *types.RemoteAccessConfig { +func expandRemoteAccessConfig(l []interface{}) *eks.RemoteAccessConfig { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - config := &types.RemoteAccessConfig{} + config := &eks.RemoteAccessConfig{} if v, ok := m["ec2_ssh_key"].(string); ok && v != "" { config.Ec2SshKey = aws.String(v) } if v, ok := m["source_security_group_ids"].(*schema.Set); ok && v.Len() > 0 { - config.SourceSecurityGroups = flex.ExpandStringValueSet(v) + config.SourceSecurityGroups = flex.ExpandStringSet(v) } return config } -func expandNodegroupUpdateConfig(tfMap map[string]interface{}) *types.NodegroupUpdateConfig { +func expandNodegroupUpdateConfig(tfMap map[string]interface{}) *eks.NodegroupUpdateConfig { if tfMap == nil { return nil } - apiObject := &types.NodegroupUpdateConfig{} + apiObject := &eks.NodegroupUpdateConfig{} if v, ok := tfMap["max_unavailable"].(int); ok && v != 0 { - apiObject.MaxUnavailable = aws.Int32(int32(v)) + apiObject.MaxUnavailable = aws.Int64(int64(v)) } if v, ok := tfMap["max_unavailable_percentage"].(int); ok && v != 0 { - apiObject.MaxUnavailablePercentage = aws.Int32(int32(v)) + apiObject.MaxUnavailablePercentage = aws.Int64(int64(v)) } return apiObject } -func expandUpdateLabelsPayload(ctx context.Context, oldLabelsMap, newLabelsMap interface{}) *types.UpdateLabelsPayload { +func expandUpdateLabelsPayload(ctx context.Context, oldLabelsMap, newLabelsMap interface{}) *eks.UpdateLabelsPayload { // EKS Labels operate similarly to keyvaluetags oldLabels := tftags.New(ctx, oldLabelsMap) newLabels := tftags.New(ctx, newLabelsMap) @@ -786,20 +790,20 @@ func expandUpdateLabelsPayload(ctx context.Context, oldLabelsMap, newLabelsMap i return nil } - updateLabelsPayload := &types.UpdateLabelsPayload{} + updateLabelsPayload := &eks.UpdateLabelsPayload{} if len(removedLabels) > 0 { - updateLabelsPayload.RemoveLabels = removedLabels.Keys() + updateLabelsPayload.RemoveLabels = aws.StringSlice(removedLabels.Keys()) } if len(updatedLabels) > 0 { - updateLabelsPayload.AddOrUpdateLabels = updatedLabels.Map() + updateLabelsPayload.AddOrUpdateLabels = aws.StringMap(updatedLabels.Map()) } return updateLabelsPayload } -func flattenAutoScalingGroups(autoScalingGroups []types.AutoScalingGroup) []map[string]interface{} { +func flattenAutoScalingGroups(autoScalingGroups []*eks.AutoScalingGroup) []map[string]interface{} { if len(autoScalingGroups) == 0 { return []map[string]interface{}{} } @@ -808,7 +812,7 @@ func flattenAutoScalingGroups(autoScalingGroups []types.AutoScalingGroup) []map[ for _, autoScalingGroup := range autoScalingGroups { m := map[string]interface{}{ - "name": aws.ToString(autoScalingGroup.Name), + "name": aws.StringValue(autoScalingGroup.Name), } l = append(l, m) @@ -817,7 +821,7 @@ func flattenAutoScalingGroups(autoScalingGroups []types.AutoScalingGroup) []map[ return l } -func flattenLaunchTemplateSpecification(config *types.LaunchTemplateSpecification) []map[string]interface{} { +func flattenLaunchTemplateSpecification(config *eks.LaunchTemplateSpecification) []map[string]interface{} { if config == nil { return nil } @@ -825,34 +829,34 @@ func flattenLaunchTemplateSpecification(config *types.LaunchTemplateSpecificatio m := map[string]interface{}{} if v := config.Id; v != nil { - m["id"] = aws.ToString(v) + m["id"] = aws.StringValue(v) } if v := config.Name; v != nil { - m["name"] = aws.ToString(v) + m["name"] = aws.StringValue(v) } if v := config.Version; v != nil { - m["version"] = aws.ToString(v) + m["version"] = aws.StringValue(v) } return []map[string]interface{}{m} } -func flattenNodeGroupResources(resources *types.NodegroupResources) []map[string]interface{} { +func flattenNodeGroupResources(resources *eks.NodegroupResources) []map[string]interface{} { if resources == nil { return []map[string]interface{}{} } m := map[string]interface{}{ "autoscaling_groups": flattenAutoScalingGroups(resources.AutoScalingGroups), - "remote_access_security_group_id": aws.ToString(resources.RemoteAccessSecurityGroup), + "remote_access_security_group_id": aws.StringValue(resources.RemoteAccessSecurityGroup), } return []map[string]interface{}{m} } -func flattenNodeGroupScalingConfig(apiObject *types.NodegroupScalingConfig) map[string]interface{} { +func flattenNodeGroupScalingConfig(apiObject *eks.NodegroupScalingConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -860,21 +864,21 @@ func flattenNodeGroupScalingConfig(apiObject *types.NodegroupScalingConfig) map[ tfMap := map[string]interface{}{} if v := apiObject.DesiredSize; v != nil { - tfMap["desired_size"] = v + tfMap["desired_size"] = aws.Int64Value(v) } if v := apiObject.MaxSize; v != nil { - tfMap["max_size"] = v + tfMap["max_size"] = aws.Int64Value(v) } if v := apiObject.MinSize; v != nil { - tfMap["min_size"] = v + tfMap["min_size"] = aws.Int64Value(v) } return tfMap } -func flattenNodeGroupUpdateConfig(apiObject *types.NodegroupUpdateConfig) map[string]interface{} { +func flattenNodeGroupUpdateConfig(apiObject *eks.NodegroupUpdateConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -882,30 +886,30 @@ func flattenNodeGroupUpdateConfig(apiObject *types.NodegroupUpdateConfig) map[st tfMap := map[string]interface{}{} if v := apiObject.MaxUnavailable; v != nil { - tfMap["max_unavailable"] = v + tfMap["max_unavailable"] = aws.Int64Value(v) } if v := apiObject.MaxUnavailablePercentage; v != nil { - tfMap["max_unavailable_percentage"] = v + tfMap["max_unavailable_percentage"] = aws.Int64Value(v) } return tfMap } -func flattenRemoteAccessConfig(config *types.RemoteAccessConfig) []map[string]interface{} { +func flattenRemoteAccessConfig(config *eks.RemoteAccessConfig) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "ec2_ssh_key": config.Ec2SshKey, - "source_security_group_ids": config.SourceSecurityGroups, + "ec2_ssh_key": aws.StringValue(config.Ec2SshKey), + "source_security_group_ids": aws.StringValueSlice(config.SourceSecurityGroups), } return []map[string]interface{}{m} } -func flattenTaints(taints []types.Taint) []interface{} { +func flattenTaints(taints []*eks.Taint) []interface{} { if len(taints) == 0 { return nil } @@ -913,10 +917,14 @@ func flattenTaints(taints []types.Taint) []interface{} { var results []interface{} for _, taint := range taints { + if taint == nil { + continue + } + t := make(map[string]interface{}) - t["key"] = taint.Key - t["value"] = taint.Value - t["effect"] = taint.Effect + t["key"] = aws.StringValue(taint.Key) + t["value"] = aws.StringValue(taint.Value) + t["effect"] = aws.StringValue(taint.Effect) results = append(results, t) } diff --git a/internal/service/eks/node_group_data_source.go b/internal/service/eks/node_group_data_source.go index 4ae390647b7..32cbdf4af4c 100644 --- a/internal/service/eks/node_group_data_source.go +++ b/internal/service/eks/node_group_data_source.go @@ -6,6 +6,7 @@ package eks import ( "context" + "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -183,13 +184,13 @@ func DataSourceNodeGroup() *schema.Resource { } func dataSourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig clusterName := d.Get("cluster_name").(string) nodeGroupName := d.Get("node_group_name").(string) id := NodeGroupCreateResourceID(clusterName, nodeGroupName) - nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) + nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) if err != nil { return diag.Errorf("reading EKS Node Group (%s): %s", id, err) @@ -224,7 +225,7 @@ func dataSourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("scaling_config", nil) } d.Set("status", nodeGroup.Status) - d.Set("subnet_ids", nodeGroup.Subnets) + d.Set("subnet_ids", aws.StringValueSlice(nodeGroup.Subnets)) if err := d.Set("taints", flattenTaints(nodeGroup.Taints)); err != nil { return diag.Errorf("setting taints: %s", err) } diff --git a/internal/service/eks/node_group_data_source_test.go b/internal/service/eks/node_group_data_source_test.go index 51c678bf8b5..e44c6c34166 100644 --- a/internal/service/eks/node_group_data_source_test.go +++ b/internal/service/eks/node_group_data_source_test.go @@ -7,8 +7,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -16,14 +15,14 @@ import ( func TestAccEKSNodeGroupDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup types.Nodegroup + var nodeGroup eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceResourceName := "data.aws_eks_node_group.test" resourceName := "aws_eks_node_group.test" resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/eks/node_group_test.go b/internal/service/eks/node_group_test.go index 685bf45ae16..379a7ee0ab3 100644 --- a/internal/service/eks/node_group_test.go +++ b/internal/service/eks/node_group_test.go @@ -9,9 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,12 +21,12 @@ import ( ) func init() { - acctest.RegisterServiceErrorCheckFunc(eks.ServiceID, testAccErrorCheckSkip) + acctest.RegisterServiceErrorCheckFunc(eks.EndpointsID, testAccErrorCheckSkip) } func TestAccEKSNodeGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup types.Nodegroup + var nodeGroup eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) eksClusterResourceName := "aws_eks_cluster.test" iamRoleResourceName := "aws_iam_role.node" @@ -35,7 +34,7 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -43,10 +42,10 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { Config: testAccNodeGroupConfig_dataSourceName(rName), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup), - resource.TestCheckResourceAttr(resourceName, "ami_type", string(types.AMITypesAl2X8664)), + resource.TestCheckResourceAttr(resourceName, "ami_type", eks.AMITypesAl2X8664), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "eks", regexache.MustCompile(fmt.Sprintf("nodegroup/%[1]s/%[1]s/.+", rName))), resource.TestCheckResourceAttrPair(resourceName, "cluster_name", eksClusterResourceName, "name"), - resource.TestCheckResourceAttr(resourceName, "capacity_type", string(types.CapacityTypesOnDemand)), + resource.TestCheckResourceAttr(resourceName, "capacity_type", eks.CapacityTypesOnDemand), resource.TestCheckResourceAttr(resourceName, "disk_size", "20"), resource.TestCheckResourceAttr(resourceName, "instance_types.#", "1"), resource.TestCheckResourceAttr(resourceName, "labels.%", "0"), @@ -61,7 +60,7 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "scaling_config.0.desired_size", "1"), resource.TestCheckResourceAttr(resourceName, "scaling_config.0.max_size", "1"), resource.TestCheckResourceAttr(resourceName, "scaling_config.0.min_size", "1"), - resource.TestCheckResourceAttr(resourceName, "status", string(types.NodegroupStatusActive)), + resource.TestCheckResourceAttr(resourceName, "status", eks.NodegroupStatusActive), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "2"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "taint.#", "0"), @@ -80,13 +79,13 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { func TestAccEKSNodeGroup_Name_generated(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup types.Nodegroup + var nodeGroup eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -109,13 +108,13 @@ func TestAccEKSNodeGroup_Name_generated(t *testing.T) { func TestAccEKSNodeGroup_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup types.Nodegroup + var nodeGroup eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -138,13 +137,13 @@ func TestAccEKSNodeGroup_namePrefix(t *testing.T) { func TestAccEKSNodeGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup types.Nodegroup + var nodeGroup eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -162,21 +161,21 @@ func TestAccEKSNodeGroup_disappears(t *testing.T) { func TestAccEKSNodeGroup_amiType(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccNodeGroupConfig_amiType(rName, string(types.AMITypesAl2X8664Gpu)), + Config: testAccNodeGroupConfig_amiType(rName, eks.AMITypesAl2X8664Gpu), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup1), - resource.TestCheckResourceAttr(resourceName, "ami_type", string(types.AMITypesAl2X8664Gpu)), + resource.TestCheckResourceAttr(resourceName, "ami_type", eks.AMITypesAl2X8664Gpu), ), }, { @@ -185,10 +184,10 @@ func TestAccEKSNodeGroup_amiType(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccNodeGroupConfig_amiType(rName, string(types.AMITypesAl2Arm64)), + Config: testAccNodeGroupConfig_amiType(rName, eks.AMITypesAl2Arm64), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup2), - resource.TestCheckResourceAttr(resourceName, "ami_type", string(types.AMITypesAl2Arm64)), + resource.TestCheckResourceAttr(resourceName, "ami_type", eks.AMITypesAl2Arm64), ), }, }, @@ -197,21 +196,21 @@ func TestAccEKSNodeGroup_amiType(t *testing.T) { func TestAccEKSNodeGroup_CapacityType_spot(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 types.Nodegroup + var nodeGroup1 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccNodeGroupConfig_capacityType(rName, string(types.CapacityTypesSpot)), + Config: testAccNodeGroupConfig_capacityType(rName, eks.CapacityTypesSpot), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup1), - resource.TestCheckResourceAttr(resourceName, "capacity_type", string(types.CapacityTypesSpot)), + resource.TestCheckResourceAttr(resourceName, "capacity_type", eks.CapacityTypesSpot), ), }, { @@ -225,13 +224,13 @@ func TestAccEKSNodeGroup_CapacityType_spot(t *testing.T) { func TestAccEKSNodeGroup_diskSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 types.Nodegroup + var nodeGroup1 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -253,13 +252,13 @@ func TestAccEKSNodeGroup_diskSize(t *testing.T) { func TestAccEKSNodeGroup_forceUpdateVersion(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 types.Nodegroup + var nodeGroup1 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -289,14 +288,14 @@ func TestAccEKSNodeGroup_forceUpdateVersion(t *testing.T) { func TestAccEKSNodeGroup_InstanceTypes_multiple(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 types.Nodegroup + var nodeGroup1 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" instanceTypes := fmt.Sprintf("%q, %q, %q, %q", "t2.medium", "t3.medium", "t2.large", "t3.large") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -322,13 +321,13 @@ func TestAccEKSNodeGroup_InstanceTypes_multiple(t *testing.T) { func TestAccEKSNodeGroup_InstanceTypes_single(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 types.Nodegroup + var nodeGroup1 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -350,13 +349,13 @@ func TestAccEKSNodeGroup_InstanceTypes_single(t *testing.T) { func TestAccEKSNodeGroup_labels(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2, nodeGroup3 types.Nodegroup + var nodeGroup1, nodeGroup2, nodeGroup3 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -396,7 +395,7 @@ func TestAccEKSNodeGroup_labels(t *testing.T) { func TestAccEKSNodeGroup_LaunchTemplate_id(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateResourceName1 := "aws_launch_template.test1" launchTemplateResourceName2 := "aws_launch_template.test2" @@ -404,7 +403,7 @@ func TestAccEKSNodeGroup_LaunchTemplate_id(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -436,7 +435,7 @@ func TestAccEKSNodeGroup_LaunchTemplate_id(t *testing.T) { func TestAccEKSNodeGroup_LaunchTemplate_name(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateResourceName1 := "aws_launch_template.test1" launchTemplateResourceName2 := "aws_launch_template.test2" @@ -444,7 +443,7 @@ func TestAccEKSNodeGroup_LaunchTemplate_name(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -476,14 +475,14 @@ func TestAccEKSNodeGroup_LaunchTemplate_name(t *testing.T) { func TestAccEKSNodeGroup_LaunchTemplate_version(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateResourceName := "aws_launch_template.test" resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -515,14 +514,14 @@ func TestAccEKSNodeGroup_LaunchTemplate_version(t *testing.T) { func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) ssmParameterDataSourceName := "data.aws_ssm_parameter.test" resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -552,7 +551,7 @@ func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { func TestAccEKSNodeGroup_RemoteAccess_ec2SSHKey(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 types.Nodegroup + var nodeGroup1 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" @@ -563,7 +562,7 @@ func TestAccEKSNodeGroup_RemoteAccess_ec2SSHKey(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -586,7 +585,7 @@ func TestAccEKSNodeGroup_RemoteAccess_ec2SSHKey(t *testing.T) { func TestAccEKSNodeGroup_RemoteAccess_sourceSecurityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 types.Nodegroup + var nodeGroup1 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" @@ -597,7 +596,7 @@ func TestAccEKSNodeGroup_RemoteAccess_sourceSecurityGroupIDs(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -620,13 +619,13 @@ func TestAccEKSNodeGroup_RemoteAccess_sourceSecurityGroupIDs(t *testing.T) { func TestAccEKSNodeGroup_Scaling_desiredSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -662,13 +661,13 @@ func TestAccEKSNodeGroup_Scaling_desiredSize(t *testing.T) { func TestAccEKSNodeGroup_Scaling_maxSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -704,13 +703,13 @@ func TestAccEKSNodeGroup_Scaling_maxSize(t *testing.T) { func TestAccEKSNodeGroup_Scaling_minSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -746,13 +745,13 @@ func TestAccEKSNodeGroup_Scaling_minSize(t *testing.T) { func TestAccEKSNodeGroup_ScalingZeroDesiredSize_minSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -798,13 +797,13 @@ func TestAccEKSNodeGroup_ScalingZeroDesiredSize_minSize(t *testing.T) { func TestAccEKSNodeGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2, nodeGroup3 types.Nodegroup + var nodeGroup1, nodeGroup2, nodeGroup3 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -846,13 +845,13 @@ func TestAccEKSNodeGroup_tags(t *testing.T) { func TestAccEKSNodeGroup_taints(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 types.Nodegroup + var nodeGroup1 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -910,13 +909,13 @@ func TestAccEKSNodeGroup_taints(t *testing.T) { func TestAccEKSNodeGroup_update(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 types.Nodegroup + var nodeGroup1 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -949,13 +948,13 @@ func TestAccEKSNodeGroup_update(t *testing.T) { func TestAccEKSNodeGroup_version(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 types.Nodegroup + var nodeGroup1, nodeGroup2 eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -989,7 +988,7 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { ) } -func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeGroup *types.Nodegroup) resource.TestCheckFunc { +func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeGroup *eks.Nodegroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -1006,9 +1005,9 @@ func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeG return err } - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) - output, err := tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) + output, err := tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) if err != nil { return err @@ -1022,7 +1021,7 @@ func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeG func testAccCheckNodeGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - client := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_node_group" { @@ -1035,7 +1034,7 @@ func testAccCheckNodeGroupDestroy(ctx context.Context) resource.TestCheckFunc { return err } - _, err = tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) + _, err = tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) if tfresource.NotFound(err) { continue @@ -1052,20 +1051,20 @@ func testAccCheckNodeGroupDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckNodeGroupNotRecreated(i, j *types.Nodegroup) resource.TestCheckFunc { +func testAccCheckNodeGroupNotRecreated(i, j *eks.Nodegroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { - return fmt.Errorf("EKS Node Group (%s) was recreated", aws.ToString(j.NodegroupName)) + if !aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { + return fmt.Errorf("EKS Node Group (%s) was recreated", aws.StringValue(j.NodegroupName)) } return nil } } -func testAccCheckNodeGroupRecreated(i, j *types.Nodegroup) resource.TestCheckFunc { +func testAccCheckNodeGroupRecreated(i, j *eks.Nodegroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { - return fmt.Errorf("EKS Node Group (%s) was not recreated", aws.ToString(j.NodegroupName)) + if aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { + return fmt.Errorf("EKS Node Group (%s) was not recreated", aws.StringValue(j.NodegroupName)) } return nil diff --git a/internal/service/eks/node_groups_data_source.go b/internal/service/eks/node_groups_data_source.go index 7d321d02eef..4f3eb7bc7dc 100644 --- a/internal/service/eks/node_groups_data_source.go +++ b/internal/service/eks/node_groups_data_source.go @@ -6,8 +6,8 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -37,29 +37,34 @@ func DataSourceNodeGroups() *schema.Resource { func dataSourceNodeGroupsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - client := meta.(*conns.AWSClient).EKSClient(ctx) + conn := meta.(*conns.AWSClient).EKSConn(ctx) + clusterName := d.Get("cluster_name").(string) input := &eks.ListNodegroupsInput{ ClusterName: aws.String(clusterName), } - var nodegroups []string - - paginator := eks.NewListNodegroupsPaginator(client, input) - for paginator.HasMorePages() { - output, err := paginator.NextPage(ctx) + var nodegroups []*string - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing EKS Node Groups: %s", err) + err := conn.ListNodegroupsPagesWithContext(ctx, input, func(page *eks.ListNodegroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - nodegroups = append(nodegroups, output.Nodegroups...) + nodegroups = append(nodegroups, page.Nodegroups...) + + return !lastPage + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing EKS Node Groups: %s", err) } d.SetId(clusterName) + d.Set("cluster_name", clusterName) - d.Set("names", nodegroups) + d.Set("names", aws.StringValueSlice(nodegroups)) return diags } diff --git a/internal/service/eks/node_groups_data_source_test.go b/internal/service/eks/node_groups_data_source_test.go index 25bd6b1b52b..2dd1c1c8942 100644 --- a/internal/service/eks/node_groups_data_source_test.go +++ b/internal/service/eks/node_groups_data_source_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -20,7 +20,7 @@ func TestAccEKSNodeGroupsDataSource_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.ServiceID), + ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index ae3f5129a26..9e6d4ad9270 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -5,8 +5,9 @@ package eks import ( "context" - aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" - eks_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eks" + aws_sdkv1 "github.com/aws/aws-sdk-go/aws" + session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" + eks_sdkv1 "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -104,15 +105,11 @@ func (p *servicePackage) ServicePackageName() string { return names.EKS } -// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. -func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*eks_sdkv2.Client, error) { - cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) +// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. +func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*eks_sdkv1.EKS, error) { + sess := config["session"].(*session_sdkv1.Session) - return eks_sdkv2.NewFromConfig(cfg, func(o *eks_sdkv2.Options) { - if endpoint := config["endpoint"].(string); endpoint != "" { - o.BaseEndpoint = aws_sdkv2.String(endpoint) - } - }), nil + return eks_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/eks/status.go b/internal/service/eks/status.go index bf237126344..1716b138070 100644 --- a/internal/service/eks/status.go +++ b/internal/service/eks/status.go @@ -6,14 +6,15 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func statusAddon(ctx context.Context, client *eks.Client, clusterName, addonName string) retry.StateRefreshFunc { +func statusAddon(ctx context.Context, conn *eks.EKS, clusterName, addonName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindAddonByClusterNameAndAddonName(ctx, client, clusterName, addonName) + output, err := FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) if tfresource.NotFound(err) { return nil, "", nil @@ -23,13 +24,13 @@ func statusAddon(ctx context.Context, client *eks.Client, clusterName, addonName return nil, "", err } - return output, string(output.Status), nil + return output, aws.StringValue(output.Status), nil } } -func statusAddonUpdate(ctx context.Context, client *eks.Client, clusterName, addonName, id string) retry.StateRefreshFunc { +func statusAddonUpdate(ctx context.Context, conn *eks.EKS, clusterName, addonName, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindAddonUpdateByClusterNameAddonNameAndID(ctx, client, clusterName, addonName, id) + output, err := FindAddonUpdateByClusterNameAddonNameAndID(ctx, conn, clusterName, addonName, id) if tfresource.NotFound(err) { return nil, "", nil @@ -39,13 +40,13 @@ func statusAddonUpdate(ctx context.Context, client *eks.Client, clusterName, add return nil, "", err } - return output, string(output.Status), nil + return output, aws.StringValue(output.Status), nil } } -func statusFargateProfile(ctx context.Context, client *eks.Client, clusterName, fargateProfileName string) retry.StateRefreshFunc { +func statusFargateProfile(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, client, clusterName, fargateProfileName) + output, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) if tfresource.NotFound(err) { return nil, "", nil @@ -55,13 +56,13 @@ func statusFargateProfile(ctx context.Context, client *eks.Client, clusterName, return nil, "", err } - return output, string(output.Status), nil + return output, aws.StringValue(output.Status), nil } } -func statusNodegroup(ctx context.Context, client *eks.Client, clusterName, nodeGroupName string) retry.StateRefreshFunc { +func statusNodegroup(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindNodegroupByClusterNameAndNodegroupName(ctx, client, clusterName, nodeGroupName) + output, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) if tfresource.NotFound(err) { return nil, "", nil @@ -71,13 +72,13 @@ func statusNodegroup(ctx context.Context, client *eks.Client, clusterName, nodeG return nil, "", err } - return output, string(output.Status), nil + return output, aws.StringValue(output.Status), nil } } -func statusNodegroupUpdate(ctx context.Context, client *eks.Client, clusterName, nodeGroupName, id string) retry.StateRefreshFunc { +func statusNodegroupUpdate(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx, client, clusterName, nodeGroupName, id) + output, err := FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx, conn, clusterName, nodeGroupName, id) if tfresource.NotFound(err) { return nil, "", nil @@ -87,13 +88,13 @@ func statusNodegroupUpdate(ctx context.Context, client *eks.Client, clusterName, return nil, "", err } - return output, string(output.Status), nil + return output, aws.StringValue(output.Status), nil } } -func statusOIDCIdentityProviderConfig(ctx context.Context, client *eks.Client, clusterName, configName string) retry.StateRefreshFunc { +func statusOIDCIdentityProviderConfig(ctx context.Context, conn *eks.EKS, clusterName, configName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, client, clusterName, configName) + output, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) if tfresource.NotFound(err) { return nil, "", nil @@ -103,6 +104,6 @@ func statusOIDCIdentityProviderConfig(ctx context.Context, client *eks.Client, c return nil, "", err } - return output, string(output.Status), nil + return output, aws.StringValue(output.Status), nil } } diff --git a/internal/service/eks/sweep.go b/internal/service/eks/sweep.go index 23d99214f25..b6338af1e91 100644 --- a/internal/service/eks/sweep.go +++ b/internal/service/eks/sweep.go @@ -7,14 +7,13 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/inspector2/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" ) func RegisterSweepers() { @@ -52,64 +51,73 @@ func RegisterSweepers() { func sweepAddons(region string) error { ctx := sweep.Context(region) - sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { return fmt.Errorf("error getting client: %w", err) } + + conn := client.EKSConn(ctx) + input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - client := sweepClient.EKSClient(ctx) - - paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - log.Print(fmt.Errorf("[WARN] Skipping EKS Add-Ons sweep for %s: %w", region, err)) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) - } - - for _, cluster := range page.Clusters { + for _, v := range page.Clusters { + clusterName := aws.StringValue(v) input := &eks.ListAddonsInput{ - ClusterName: &cluster, + ClusterName: aws.String(clusterName), } - paginator := eks.NewListAddonsPaginator(client, input) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - continue + err := conn.ListAddonsPagesWithContext(ctx, input, func(page *eks.ListAddonsOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if errs.IsA[*types.ResourceNotFoundException](err) { - log.Print(fmt.Errorf("[WARN] Skipping cluster %s not found: %w", region, err)) - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Add-Ons (%s): %w", region, err)) - } - - for _, addon := range page.Addons { + for _, v := range page.Addons { r := ResourceAddon() d := r.Data(nil) - d.SetId(AddonCreateResourceID(cluster, addon)) + d.SetId(AddonCreateResourceID(clusterName, aws.StringValue(v))) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } + + return !lastPage + }) + + if awsv1.SkipSweepError(err) { + continue + } + + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Add-Ons (%s): %w", region, err)) } } + + return !lastPage + }) + + if awsv1.SkipSweepError(err) { + log.Print(fmt.Errorf("[WARN] Skipping EKS Add-Ons sweep for %s: %w", region, err)) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } - if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping EKS Add-Ons (%s): %w", region, err)) } @@ -118,37 +126,42 @@ func sweepAddons(region string) error { func sweepClusters(region string) error { ctx := sweep.Context(region) - sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("error getting client: %s", err) } + conn := client.EKSConn(ctx) + input := &eks.ListClustersInput{} sweepResources := make([]sweep.Sweepable, 0) - client := sweepClient.EKSClient(ctx) - - paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping EKS Clusters sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing EKS Clusters (%s): %w", region, err) + err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage } for _, cluster := range page.Clusters { r := ResourceCluster() d := r.Data(nil) - d.SetId(cluster) + d.SetId(aws.StringValue(cluster)) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } + + return !lastPage + }) + + if awsv1.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Clusters sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing EKS Clusters (%s): %w", region, err) } - if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { return fmt.Errorf("error sweeping EKS Clusters (%s): %w", region, err) } @@ -157,60 +170,66 @@ func sweepClusters(region string) error { func sweepFargateProfiles(region string) error { ctx := sweep.Context(region) - sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { return fmt.Errorf("error getting client: %w", err) } + conn := client.EKSConn(ctx) + input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - client := sweepClient.EKSClient(ctx) - - paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping EKS Fargate Profiles sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) + err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage } for _, cluster := range page.Clusters { input := &eks.ListFargateProfilesInput{ - ClusterName: &cluster, + ClusterName: cluster, } - paginator := eks.NewListFargateProfilesPaginator(client, input) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - continue - } - - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if errs.IsA[*types.ResourceNotFoundException](err) { - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Fargate Profiles (%s): %w", region, err)) + err := conn.ListFargateProfilesPagesWithContext(ctx, input, func(page *eks.ListFargateProfilesOutput, lastPage bool) bool { + if page == nil { + return !lastPage } for _, profile := range page.FargateProfileNames { r := ResourceFargateProfile() d := r.Data(nil) - d.SetId(FargateProfileCreateResourceID(cluster, profile)) + d.SetId(FargateProfileCreateResourceID(aws.StringValue(cluster), aws.StringValue(profile))) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } + + return !lastPage + }) + + if awsv1.SkipSweepError(err) { + continue + } + + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Fargate Profiles (%s): %w", region, err)) } } + + return !lastPage + }) + + if awsv1.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Fargate Profiles sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -224,63 +243,72 @@ func sweepFargateProfiles(region string) error { func sweepIdentityProvidersConfig(region string) error { ctx := sweep.Context(region) - sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { return fmt.Errorf("error getting client: %w", err) } + + conn := client.EKSConn(ctx) + input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - client := sweepClient.EKSClient(ctx) - - paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - log.Print(fmt.Errorf("[WARN] Skipping EKS Identity Provider Configs sweep for %s: %w", region, err)) - return sweeperErrs // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) + err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage } for _, cluster := range page.Clusters { input := &eks.ListIdentityProviderConfigsInput{ - ClusterName: &cluster, + ClusterName: cluster, } - paginator := eks.NewListIdentityProviderConfigsPaginator(client, input) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - continue - } - - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if errs.IsA[*types.ResourceNotFoundException](err) { - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Identity Provider Configs (%s): %w", region, err)) + err := conn.ListIdentityProviderConfigsPagesWithContext(ctx, input, func(page *eks.ListIdentityProviderConfigsOutput, lastPage bool) bool { + if page == nil { + return !lastPage } for _, identityProviderConfig := range page.IdentityProviderConfigs { r := ResourceIdentityProviderConfig() d := r.Data(nil) - d.SetId(IdentityProviderConfigCreateResourceID(cluster, aws.ToString(identityProviderConfig.Name))) + d.SetId(IdentityProviderConfigCreateResourceID(aws.StringValue(cluster), aws.StringValue(identityProviderConfig.Name))) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } + + return !lastPage + }) + + if awsv1.SkipSweepError(err) { + continue + } + + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Identity Provider Configs (%s): %w", region, err)) } } + + return !lastPage + }) + + if awsv1.SkipSweepError(err) { + log.Print(fmt.Errorf("[WARN] Skipping EKS Identity Provider Configs sweep for %s: %w", region, err)) + return sweeperErrs // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } - if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping EKS Identity Provider Configs (%s): %w", region, err)) } @@ -289,62 +317,71 @@ func sweepIdentityProvidersConfig(region string) error { func sweepNodeGroups(region string) error { ctx := sweep.Context(region) - sweepClient, err := sweep.SharedRegionalSweepClient(ctx, region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { return fmt.Errorf("error getting client: %w", err) } + conn := client.EKSConn(ctx) + input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - client := sweepClient.EKSClient(ctx) - - paginator := eks.NewListClustersPaginator(client, &eks.ListClustersInput{}) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping EKS Node Groups sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) + err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage } for _, cluster := range page.Clusters { input := &eks.ListNodegroupsInput{ - ClusterName: &cluster, + ClusterName: cluster, } - paginator := eks.NewListNodegroupsPaginator(client, input) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - continue - } - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if errs.IsA[*types.ResourceNotFoundException](err) { - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Node Groups (%s): %w", region, err)) + err := conn.ListNodegroupsPagesWithContext(ctx, input, func(page *eks.ListNodegroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage } for _, nodeGroup := range page.Nodegroups { r := ResourceNodeGroup() d := r.Data(nil) - d.SetId(NodeGroupCreateResourceID(cluster, nodeGroup)) + d.SetId(NodeGroupCreateResourceID(aws.StringValue(cluster), aws.StringValue(nodeGroup))) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, sweepClient)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } + + return !lastPage + }) + + if awsv1.SkipSweepError(err) { + continue + } + + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Node Groups (%s): %w", region, err)) } } + + return !lastPage + }) + + if awsv1.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Node Groups sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors } - if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping EKS Node Groups (%s): %w", region, err)) } diff --git a/internal/service/eks/tags_gen.go b/internal/service/eks/tags_gen.go index 3f9c737dcce..b83a214f802 100644 --- a/internal/service/eks/tags_gen.go +++ b/internal/service/eks/tags_gen.go @@ -5,8 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -18,12 +19,12 @@ import ( // listTags lists eks service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn *eks.Client, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn eksiface.EKSAPI, identifier string) (tftags.KeyValueTags, error) { input := &eks.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResource(ctx, input) + output, err := conn.ListTagsForResourceWithContext(ctx, input) if err != nil { return tftags.New(ctx, nil), err @@ -35,7 +36,7 @@ func listTags(ctx context.Context, conn *eks.Client, identifier string) (tftags. // ListTags lists eks service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).EKSClient(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).EKSConn(ctx), identifier) if err != nil { return err @@ -48,21 +49,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]string handling +// map[string]*string handling // Tags returns eks service tags. -func Tags(tags tftags.KeyValueTags) map[string]string { - return tags.Map() +func Tags(tags tftags.KeyValueTags) map[string]*string { + return aws.StringMap(tags.Map()) } // KeyValueTags creates tftags.KeyValueTags from eks service tags. -func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns eks service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]string { +func getTagsIn(ctx context.Context) map[string]*string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -73,7 +74,7 @@ func getTagsIn(ctx context.Context) map[string]string { } // setTagsOut sets eks service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]string) { +func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) } @@ -82,7 +83,7 @@ func setTagsOut(ctx context.Context, tags map[string]string) { // updateTags updates eks service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn *eks.Client, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn eksiface.EKSAPI, identifier string, oldTagsMap, newTagsMap any) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -93,10 +94,10 @@ func updateTags(ctx context.Context, conn *eks.Client, identifier string, oldTag if len(removedTags) > 0 { input := &eks.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: removedTags.Keys(), + TagKeys: aws.StringSlice(removedTags.Keys()), } - _, err := conn.UntagResource(ctx, input) + _, err := conn.UntagResourceWithContext(ctx, input) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -111,7 +112,7 @@ func updateTags(ctx context.Context, conn *eks.Client, identifier string, oldTag Tags: Tags(updatedTags), } - _, err := conn.TagResource(ctx, input) + _, err := conn.TagResourceWithContext(ctx, input) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -124,5 +125,5 @@ func updateTags(ctx context.Context, conn *eks.Client, identifier string, oldTag // UpdateTags updates eks service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).EKSClient(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).EKSConn(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/eks/wait.go b/internal/service/eks/wait.go index fa70122180a..57a94e414fd 100644 --- a/internal/service/eks/wait.go +++ b/internal/service/eks/wait.go @@ -7,10 +7,9 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -18,18 +17,60 @@ const ( clusterDeleteRetryTimeout = 60 * time.Minute ) -func waitAddonUpdateSuccessful(ctx context.Context, client *eks.Client, clusterName, addonName, id string, timeout time.Duration) (*types.Update, error) { +func waitAddonCreated(ctx context.Context, conn *eks.EKS, clusterName, addonName string, timeout time.Duration) (*eks.Addon, error) { stateConf := retry.StateChangeConf{ - Pending: enum.Slice(types.UpdateStatusInProgress), - Target: enum.Slice(types.UpdateStatusSuccessful), - Refresh: statusAddonUpdate(ctx, client, clusterName, addonName, id), + Pending: []string{eks.AddonStatusCreating, eks.AddonStatusDegraded}, + Target: []string{eks.AddonStatusActive}, + Refresh: statusAddon(ctx, conn, clusterName, addonName), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*types.Update); ok { - if status := output.Status; status == types.UpdateStatusCancelled || status == types.UpdateStatusFailed { + if output, ok := outputRaw.(*eks.Addon); ok { + if status, health := aws.StringValue(output.Status), output.Health; status == eks.AddonStatusCreateFailed && health != nil { + tfresource.SetLastError(err, AddonIssuesError(health.Issues)) + } + + return output, err + } + + return nil, err +} + +func waitAddonDeleted(ctx context.Context, conn *eks.EKS, clusterName, addonName string, timeout time.Duration) (*eks.Addon, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eks.AddonStatusActive, eks.AddonStatusDeleting}, + Target: []string{}, + Refresh: statusAddon(ctx, conn, clusterName, addonName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*eks.Addon); ok { + if status, health := aws.StringValue(output.Status), output.Health; status == eks.AddonStatusDeleteFailed && health != nil { + tfresource.SetLastError(err, AddonIssuesError(health.Issues)) + } + + return output, err + } + + return nil, err +} + +func waitAddonUpdateSuccessful(ctx context.Context, conn *eks.EKS, clusterName, addonName, id string, timeout time.Duration) (*eks.Update, error) { + stateConf := retry.StateChangeConf{ + Pending: []string{eks.UpdateStatusInProgress}, + Target: []string{eks.UpdateStatusSuccessful}, + Refresh: statusAddonUpdate(ctx, conn, clusterName, addonName, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*eks.Update); ok { + if status := aws.StringValue(output.Status); status == eks.UpdateStatusCancelled || status == eks.UpdateStatusFailed { tfresource.SetLastError(err, ErrorDetailsError(output.Errors)) } @@ -39,18 +80,94 @@ func waitAddonUpdateSuccessful(ctx context.Context, client *eks.Client, clusterN return nil, err } -func waitNodegroupUpdateSuccessful(ctx context.Context, client *eks.Client, clusterName, nodeGroupName, id string, timeout time.Duration) (*types.Update, error) { //nolint:unparam +func waitFargateProfileCreated(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string, timeout time.Duration) (*eks.FargateProfile, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eks.FargateProfileStatusCreating}, + Target: []string{eks.FargateProfileStatusActive}, + Refresh: statusFargateProfile(ctx, conn, clusterName, fargateProfileName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*eks.FargateProfile); ok { + return output, err + } + + return nil, err +} + +func waitFargateProfileDeleted(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string, timeout time.Duration) (*eks.FargateProfile, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eks.FargateProfileStatusActive, eks.FargateProfileStatusDeleting}, + Target: []string{}, + Refresh: statusFargateProfile(ctx, conn, clusterName, fargateProfileName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*eks.FargateProfile); ok { + return output, err + } + + return nil, err +} + +func waitNodegroupCreated(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string, timeout time.Duration) (*eks.Nodegroup, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eks.NodegroupStatusCreating}, + Target: []string{eks.NodegroupStatusActive}, + Refresh: statusNodegroup(ctx, conn, clusterName, nodeGroupName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*eks.Nodegroup); ok { + if status, health := aws.StringValue(output.Status), output.Health; status == eks.NodegroupStatusCreateFailed && health != nil { + tfresource.SetLastError(err, IssuesError(health.Issues)) + } + + return output, err + } + + return nil, err +} + +func waitNodegroupDeleted(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string, timeout time.Duration) (*eks.Nodegroup, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eks.NodegroupStatusActive, eks.NodegroupStatusDeleting}, + Target: []string{}, + Refresh: statusNodegroup(ctx, conn, clusterName, nodeGroupName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*eks.Nodegroup); ok { + if status, health := aws.StringValue(output.Status), output.Health; status == eks.NodegroupStatusDeleteFailed && health != nil { + tfresource.SetLastError(err, IssuesError(health.Issues)) + } + + return output, err + } + + return nil, err +} + +func waitNodegroupUpdateSuccessful(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName, id string, timeout time.Duration) (*eks.Update, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(types.UpdateStatusInProgress), - Target: enum.Slice(types.UpdateStatusSuccessful), - Refresh: statusNodegroupUpdate(ctx, client, clusterName, nodeGroupName, id), + Pending: []string{eks.UpdateStatusInProgress}, + Target: []string{eks.UpdateStatusSuccessful}, + Refresh: statusNodegroupUpdate(ctx, conn, clusterName, nodeGroupName, id), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*types.Update); ok { - if status := output.Status; status == types.UpdateStatusCancelled || status == types.UpdateStatusFailed { + if output, ok := outputRaw.(*eks.Update); ok { + if status := aws.StringValue(output.Status); status == eks.UpdateStatusCancelled || status == eks.UpdateStatusFailed { tfresource.SetLastError(err, ErrorDetailsError(output.Errors)) } @@ -60,34 +177,34 @@ func waitNodegroupUpdateSuccessful(ctx context.Context, client *eks.Client, clus return nil, err } -func waitOIDCIdentityProviderConfigCreated(ctx context.Context, client *eks.Client, clusterName, configName string, timeout time.Duration) (*types.OidcIdentityProviderConfig, error) { +func waitOIDCIdentityProviderConfigCreated(ctx context.Context, conn *eks.EKS, clusterName, configName string, timeout time.Duration) (*eks.OidcIdentityProviderConfig, error) { stateConf := retry.StateChangeConf{ - Pending: enum.Slice(types.ConfigStatusCreating), - Target: enum.Slice(types.ConfigStatusActive), - Refresh: statusOIDCIdentityProviderConfig(ctx, client, clusterName, configName), + Pending: []string{eks.ConfigStatusCreating}, + Target: []string{eks.ConfigStatusActive}, + Refresh: statusOIDCIdentityProviderConfig(ctx, conn, clusterName, configName), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*types.OidcIdentityProviderConfig); ok { + if output, ok := outputRaw.(*eks.OidcIdentityProviderConfig); ok { return output, err } return nil, err } -func waitOIDCIdentityProviderConfigDeleted(ctx context.Context, client *eks.Client, clusterName, configName string, timeout time.Duration) (*types.OidcIdentityProviderConfig, error) { +func waitOIDCIdentityProviderConfigDeleted(ctx context.Context, conn *eks.EKS, clusterName, configName string, timeout time.Duration) (*eks.OidcIdentityProviderConfig, error) { stateConf := retry.StateChangeConf{ - Pending: enum.Slice(types.ConfigStatusActive, types.ConfigStatusDeleting), + Pending: []string{eks.ConfigStatusActive, eks.ConfigStatusDeleting}, Target: []string{}, - Refresh: statusOIDCIdentityProviderConfig(ctx, client, clusterName, configName), + Refresh: statusOIDCIdentityProviderConfig(ctx, conn, clusterName, configName), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*types.OidcIdentityProviderConfig); ok { + if output, ok := outputRaw.(*eks.OidcIdentityProviderConfig); ok { return output, err } diff --git a/names/names_data.csv b/names/names_data.csv index 43dced7e57f..e0fbae874eb 100644 --- a/names/names_data.csv +++ b/names/names_data.csv @@ -131,7 +131,7 @@ ecr,ecr,ecr,ecr,,ecr,,,ECR,ECR,,1,,,aws_ecr_,,ecr_,ECR (Elastic Container Regist ecr-public,ecrpublic,ecrpublic,ecrpublic,,ecrpublic,,,ECRPublic,ECRPublic,,1,,,aws_ecrpublic_,,ecrpublic_,ECR Public,Amazon,,,,,, ecs,ecs,ecs,ecs,,ecs,,,ECS,ECS,,1,,,aws_ecs_,,ecs_,ECS (Elastic Container),Amazon,,,,,, efs,efs,efs,efs,,efs,,,EFS,EFS,,1,,,aws_efs_,,efs_,EFS (Elastic File System),Amazon,,,,,, -eks,eks,eks,eks,,eks,,,EKS,EKS,,,2,,aws_eks_,,eks_,EKS (Elastic Kubernetes),Amazon,,,,,, +eks,eks,eks,eks,,eks,,,EKS,EKS,,1,,,aws_eks_,,eks_,EKS (Elastic Kubernetes),Amazon,,,,,, elasticbeanstalk,elasticbeanstalk,elasticbeanstalk,elasticbeanstalk,,elasticbeanstalk,,beanstalk,ElasticBeanstalk,ElasticBeanstalk,,1,,aws_elastic_beanstalk_,aws_elasticbeanstalk_,,elastic_beanstalk_,Elastic Beanstalk,AWS,,,,,, elastic-inference,elasticinference,elasticinference,elasticinference,,elasticinference,,,ElasticInference,ElasticInference,,1,,,aws_elasticinference_,,elasticinference_,Elastic Inference,Amazon,,x,,,, elastictranscoder,elastictranscoder,elastictranscoder,elastictranscoder,,elastictranscoder,,,ElasticTranscoder,ElasticTranscoder,,1,,,aws_elastictranscoder_,,elastictranscoder_,Elastic Transcoder,Amazon,,,,,, From 4be47f8c9b2c490f071439778e54407ca4959ba7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 16:25:55 -0500 Subject: [PATCH 05/46] Use AWS SDK for Go v2 for eks service. --- names/names_data.csv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/names/names_data.csv b/names/names_data.csv index 5146313f855..ee995ad67e0 100644 --- a/names/names_data.csv +++ b/names/names_data.csv @@ -131,7 +131,7 @@ ecr,ecr,ecr,ecr,,ecr,,,ECR,ECR,,1,,,aws_ecr_,,ecr_,ECR (Elastic Container Regist ecr-public,ecrpublic,ecrpublic,ecrpublic,,ecrpublic,,,ECRPublic,ECRPublic,,1,,,aws_ecrpublic_,,ecrpublic_,ECR Public,Amazon,,,,,, ecs,ecs,ecs,ecs,,ecs,,,ECS,ECS,,1,,,aws_ecs_,,ecs_,ECS (Elastic Container),Amazon,,,,,, efs,efs,efs,efs,,efs,,,EFS,EFS,,1,,,aws_efs_,,efs_,EFS (Elastic File System),Amazon,,,,,, -eks,eks,eks,eks,,eks,,,EKS,EKS,,1,,,aws_eks_,,eks_,EKS (Elastic Kubernetes),Amazon,,,,,, +eks,eks,eks,eks,,eks,,,EKS,EKS,,,2,,aws_eks_,,eks_,EKS (Elastic Kubernetes),Amazon,,,,,, elasticbeanstalk,elasticbeanstalk,elasticbeanstalk,elasticbeanstalk,,elasticbeanstalk,,beanstalk,ElasticBeanstalk,ElasticBeanstalk,,1,,aws_elastic_beanstalk_,aws_elasticbeanstalk_,,elastic_beanstalk_,Elastic Beanstalk,AWS,,,,,, elastic-inference,elasticinference,elasticinference,elasticinference,,elasticinference,,,ElasticInference,ElasticInference,,1,,,aws_elasticinference_,,elasticinference_,Elastic Inference,Amazon,,x,,,, elastictranscoder,elastictranscoder,elastictranscoder,elastictranscoder,,elastictranscoder,,,ElasticTranscoder,ElasticTranscoder,,1,,,aws_elastictranscoder_,,elastictranscoder_,Elastic Transcoder,Amazon,,,,,, From bb02894285bacb1a33b1664e492c75cdd4d1494b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 16:27:42 -0500 Subject: [PATCH 06/46] Add 'names.EKSEndpointID'. --- names/names.go | 1 + 1 file changed, 1 insertion(+) diff --git a/names/names.go b/names/names.go index e14390dcbd1..b23f54c70ec 100644 --- a/names/names.go +++ b/names/names.go @@ -37,6 +37,7 @@ const ( ComprehendEndpointID = "comprehend" ComputeOptimizerEndpointID = "computeoptimizer" DSEndpointID = "ds" + EKSEndpointID = "eks" EMRServerlessEndpointID = "emrserverless" GlacierEndpointID = "glacier" IdentityStoreEndpointID = "identitystore" From 97233014cb41542f3c4369544aa7a90108cb6eb6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 16:29:28 -0500 Subject: [PATCH 07/46] eks: Generate AWS SDK for Go v2 tagging code. --- internal/service/eks/generate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/eks/generate.go b/internal/service/eks/generate.go index d9cf18f5965..acb50aab0b3 100644 --- a/internal/service/eks/generate.go +++ b/internal/service/eks/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ServiceTagsMap -KVTValues -SkipTypesImp -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. From d7c0c4b8bb30cacffd06e74068e60f854f1a92df Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 16:30:30 -0500 Subject: [PATCH 08/46] Run 'make gen'. --- internal/conns/awsclient_gen.go | 6 ++-- internal/service/eks/service_package_gen.go | 17 ++++++----- internal/service/eks/tags_gen.go | 33 ++++++++++----------- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 6860b7ac80a..a85deb7c3cb 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -21,6 +21,7 @@ import ( directoryservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/directoryservice" docdbelastic_sdkv2 "github.com/aws/aws-sdk-go-v2/service/docdbelastic" ec2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ec2" + eks_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eks" emrserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrserverless" finspace_sdkv2 "github.com/aws/aws-sdk-go-v2/service/finspace" fis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fis" @@ -133,7 +134,6 @@ import ( ecrpublic_sdkv1 "github.com/aws/aws-sdk-go/service/ecrpublic" ecs_sdkv1 "github.com/aws/aws-sdk-go/service/ecs" efs_sdkv1 "github.com/aws/aws-sdk-go/service/efs" - eks_sdkv1 "github.com/aws/aws-sdk-go/service/eks" elasticache_sdkv1 "github.com/aws/aws-sdk-go/service/elasticache" elasticbeanstalk_sdkv1 "github.com/aws/aws-sdk-go/service/elasticbeanstalk" elasticsearchservice_sdkv1 "github.com/aws/aws-sdk-go/service/elasticsearchservice" @@ -527,8 +527,8 @@ func (c *AWSClient) EFSConn(ctx context.Context) *efs_sdkv1.EFS { return errs.Must(conn[*efs_sdkv1.EFS](ctx, c, names.EFS)) } -func (c *AWSClient) EKSConn(ctx context.Context) *eks_sdkv1.EKS { - return errs.Must(conn[*eks_sdkv1.EKS](ctx, c, names.EKS)) +func (c *AWSClient) EKSClient(ctx context.Context) *eks_sdkv2.Client { + return errs.Must(client[*eks_sdkv2.Client](ctx, c, names.EKS)) } func (c *AWSClient) ELBConn(ctx context.Context) *elb_sdkv1.ELB { diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 9e6d4ad9270..ae3f5129a26 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -5,9 +5,8 @@ package eks import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - eks_sdkv1 "github.com/aws/aws-sdk-go/service/eks" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + eks_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -105,11 +104,15 @@ func (p *servicePackage) ServicePackageName() string { return names.EKS } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*eks_sdkv1.EKS, error) { - sess := config["session"].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*eks_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return eks_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil + return eks_sdkv2.NewFromConfig(cfg, func(o *eks_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + }), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/eks/tags_gen.go b/internal/service/eks/tags_gen.go index b83a214f802..3f9c737dcce 100644 --- a/internal/service/eks/tags_gen.go +++ b/internal/service/eks/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/aws/aws-sdk-go/service/eks/eksiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists eks service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn eksiface.EKSAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *eks.Client, identifier string) (tftags.KeyValueTags, error) { input := &eks.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn eksiface.EKSAPI, identifier string) (tft // ListTags lists eks service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).EKSConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).EKSClient(ctx), identifier) if err != nil { return err @@ -49,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns eks service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from eks service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns eks service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets eks service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates eks service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn eksiface.EKSAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *eks.Client, identifier string, oldTagsMap, newTagsMap any) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn eksiface.EKSAPI, identifier string, ol if len(removedTags) > 0 { input := &eks.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn eksiface.EKSAPI, identifier string, ol Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn eksiface.EKSAPI, identifier string, ol // UpdateTags updates eks service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).EKSConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).EKSClient(ctx), identifier, oldTags, newTags) } From 46c04f9c8ec54851de6fb90bb99b9ba0f7126e32 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 16:31:16 -0500 Subject: [PATCH 09/46] Run 'go get github.com/aws/aws-sdk-go-v2/service/eks@v1.32.0 && go mod tidy'. --- go.mod | 1 + go.sum | 2 ++ 2 files changed, 3 insertions(+) diff --git a/go.mod b/go.mod index 4584ff580c9..ce567c7ece4 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/directoryservice v1.21.0 github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.5.0 github.com/aws/aws-sdk-go-v2/service/ec2 v1.130.0 + github.com/aws/aws-sdk-go-v2/service/eks v1.32.0 github.com/aws/aws-sdk-go-v2/service/emrserverless v1.13.0 github.com/aws/aws-sdk-go-v2/service/finspace v1.16.0 github.com/aws/aws-sdk-go-v2/service/fis v1.19.0 diff --git a/go.sum b/go.sum index fdc6c79cd0d..794f2e150d0 100644 --- a/go.sum +++ b/go.sum @@ -83,6 +83,8 @@ github.com/aws/aws-sdk-go-v2/service/dynamodb v1.23.0 h1:xmSAn14nM6IdHyuWO/bsrAa github.com/aws/aws-sdk-go-v2/service/dynamodb v1.23.0/go.mod h1:1HkLh8vaL4obF95fne7ZOu7sxomS/+vkBt3/+gqqwE4= github.com/aws/aws-sdk-go-v2/service/ec2 v1.130.0 h1:a7CPCX/m+owAiAqcK8W9/SoB7EA4QUE4BddYdFyEGco= github.com/aws/aws-sdk-go-v2/service/ec2 v1.130.0/go.mod h1:EJlGVMO5zynmSDdvwJfFa2RzAZoHI4gVJER0h82/dYk= +github.com/aws/aws-sdk-go-v2/service/eks v1.32.0 h1:w8O52S+rH0026BqntZs55OWbV8Huq8VhXL9bSixCRYc= +github.com/aws/aws-sdk-go-v2/service/eks v1.32.0/go.mod h1:l13fsSUzq27egNlyKBw4l0+mOSwa5Kt6r3JmJblYt14= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.13.0 h1:y56k/4nfqSV8iQ7Us/TnoXhTqlhWz//V5uxQdfMbMKE= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.13.0/go.mod h1:IGFmVFAav4kjHb/AjYiu677R0hvU9Knhy3MwkEcvdjw= github.com/aws/aws-sdk-go-v2/service/finspace v1.16.0 h1:vUaANfye60JdQ6naelwbw30GO5qJzv+Ks1+R3JjcW7o= From 6462c7f285bec735d2ba53c0061ee3eee2542aac Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 16:37:16 -0500 Subject: [PATCH 10/46] d/aws_eks_addon: Migrate to AWS SDK for Go v2. --- internal/service/eks/addon_data_source.go | 43 ++++++++++++++++++--- internal/service/eks/service_package_gen.go | 2 +- 2 files changed, 38 insertions(+), 7 deletions(-) diff --git a/internal/service/eks/addon_data_source.go b/internal/service/eks/addon_data_source.go index 7779c12f741..62d821430ba 100644 --- a/internal/service/eks/addon_data_source.go +++ b/internal/service/eks/addon_data_source.go @@ -7,16 +7,21 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) // @SDKDataSource("aws_eks_addon") -func DataSourceAddon() *schema.Resource { +func dataSourceAddon() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceAddonRead, Schema: map[string]*schema.Schema{ @@ -60,14 +65,14 @@ func DataSourceAddon() *schema.Resource { } func dataSourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig addonName := d.Get("addon_name").(string) clusterName := d.Get("cluster_name").(string) id := AddonCreateResourceID(clusterName, addonName) - addon, err := FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) + addon, err := findAddonByTwoPartKey(ctx, conn, clusterName, addonName) if err != nil { return diag.Errorf("reading EKS Add-On (%s): %s", id, err) @@ -77,8 +82,8 @@ func dataSourceAddonRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("addon_version", addon.AddonVersion) d.Set("arn", addon.AddonArn) d.Set("configuration_values", addon.ConfigurationValues) - d.Set("created_at", aws.TimeValue(addon.CreatedAt).Format(time.RFC3339)) - d.Set("modified_at", aws.TimeValue(addon.ModifiedAt).Format(time.RFC3339)) + d.Set("created_at", aws.ToTime(addon.CreatedAt).Format(time.RFC3339)) + d.Set("modified_at", aws.ToTime(addon.ModifiedAt).Format(time.RFC3339)) d.Set("service_account_role_arn", addon.ServiceAccountRoleArn) if err := d.Set("tags", KeyValueTags(ctx, addon.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { @@ -87,3 +92,29 @@ func dataSourceAddonRead(ctx context.Context, d *schema.ResourceData, meta inter return nil } + +func findAddonByTwoPartKey(ctx context.Context, conn *eks.Client, clusterName, addonName string) (*types.Addon, error) { + input := &eks.DescribeAddonInput{ + AddonName: aws.String(addonName), + ClusterName: aws.String(clusterName), + } + + output, err := conn.DescribeAddon(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Addon == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Addon, nil +} diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index ae3f5129a26..31eb6cd1fba 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -25,7 +25,7 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceAddon, + Factory: dataSourceAddon, TypeName: "aws_eks_addon", }, { From d0d21f59792a136122f35e484ba20cfbb81e11a3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 16:59:23 -0500 Subject: [PATCH 11/46] d/aws_eks_addon_version: Migrate to AWS SDK for Go v2. --- .../service/eks/addon_version_data_source.go | 58 ++++++++++++++++--- internal/service/eks/find.go | 50 ---------------- internal/service/eks/service_package_gen.go | 2 +- 3 files changed, 51 insertions(+), 59 deletions(-) diff --git a/internal/service/eks/addon_version_data_source.go b/internal/service/eks/addon_version_data_source.go index 47264e58ea5..6a416edaf88 100644 --- a/internal/service/eks/addon_version_data_source.go +++ b/internal/service/eks/addon_version_data_source.go @@ -6,14 +6,20 @@ package eks import ( "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) // @SDKDataSource("aws_eks_addon_version") -func DataSourceAddonVersion() *schema.Resource { +func dataSourceAddonVersion() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceAddonVersionRead, @@ -40,21 +46,18 @@ func DataSourceAddonVersion() *schema.Resource { } func dataSourceAddonVersionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) addonName := d.Get("addon_name").(string) kubernetesVersion := d.Get("kubernetes_version").(string) mostRecent := d.Get("most_recent").(bool) - id := addonName - - versionInfo, err := FindAddonVersionByAddonNameAndKubernetesVersion(ctx, conn, id, kubernetesVersion, mostRecent) + versionInfo, err := findAddonVersionByTwoPartKey(ctx, conn, addonName, kubernetesVersion, mostRecent) if err != nil { - return diag.Errorf("reading EKS Add-On version info (%s, %s): %s", id, kubernetesVersion, err) + return diag.Errorf("reading EKS Add-On version info (%s, %s): %s", addonName, kubernetesVersion, err) } - d.SetId(id) - + d.SetId(addonName) d.Set("addon_name", addonName) d.Set("kubernetes_version", kubernetesVersion) d.Set("most_recent", mostRecent) @@ -62,3 +65,42 @@ func dataSourceAddonVersionRead(ctx context.Context, d *schema.ResourceData, met return nil } + +func findAddonVersionByTwoPartKey(ctx context.Context, conn *eks.Client, addonName, kubernetesVersion string, mostRecent bool) (*types.AddonVersionInfo, error) { + input := &eks.DescribeAddonVersionsInput{ + AddonName: aws.String(addonName), + KubernetesVersion: aws.String(kubernetesVersion), + } + + pages := eks.NewDescribeAddonVersionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.Addons { + for i, v := range v.AddonVersions { + if mostRecent && i == 0 && v.AddonVersion != nil { + return &v, nil + } + + for _, compatibility := range v.Compatibilities { + if compatibility.DefaultVersion && v.AddonVersion != nil { + return &v, nil + } + } + } + } + } + + return nil, tfresource.NewEmptyResultError(input) +} diff --git a/internal/service/eks/find.go b/internal/service/eks/find.go index 2d5b5e6e11e..4c961792188 100644 --- a/internal/service/eks/find.go +++ b/internal/service/eks/find.go @@ -71,56 +71,6 @@ func FindAddonUpdateByClusterNameAddonNameAndID(ctx context.Context, conn *eks.E return output.Update, nil } -func FindAddonVersionByAddonNameAndKubernetesVersion(ctx context.Context, conn *eks.EKS, addonName, kubernetesVersion string, mostRecent bool) (*eks.AddonVersionInfo, error) { - input := &eks.DescribeAddonVersionsInput{ - AddonName: aws.String(addonName), - KubernetesVersion: aws.String(kubernetesVersion), - } - var version *eks.AddonVersionInfo - - err := conn.DescribeAddonVersionsPagesWithContext(ctx, input, func(page *eks.DescribeAddonVersionsOutput, lastPage bool) bool { - if page == nil || len(page.Addons) == 0 { - return !lastPage - } - - for _, addon := range page.Addons { - for i, addonVersion := range addon.AddonVersions { - if mostRecent && i == 0 { - version = addonVersion - return !lastPage - } - for _, versionCompatibility := range addonVersion.Compatibilities { - if aws.BoolValue(versionCompatibility.DefaultVersion) { - version = addonVersion - return !lastPage - } - } - } - } - return lastPage - }) - - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if version == nil || version.AddonVersion == nil { - return nil, &retry.NotFoundError{ - Message: "Empty result", - LastRequest: input, - } - } - - return version, nil -} - func FindFargateProfileByClusterNameAndFargateProfileName(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string) (*eks.FargateProfile, error) { input := &eks.DescribeFargateProfileInput{ ClusterName: aws.String(clusterName), diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 31eb6cd1fba..20530d5f3bf 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -29,7 +29,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_eks_addon", }, { - Factory: DataSourceAddonVersion, + Factory: dataSourceAddonVersion, TypeName: "aws_eks_addon_version", }, { From 176c462ea66c6b7158af0697739b164a0c77ddb0 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 6 Nov 2023 17:21:02 -0500 Subject: [PATCH 12/46] r/aws_eks_addon: Migrate to AWS SDK for Go v2. --- internal/service/eks/addon.go | 272 +++++++++++++++++--- internal/service/eks/addon_data_source.go | 31 --- internal/service/eks/errors.go | 26 -- internal/service/eks/find.go | 59 ----- internal/service/eks/service_package_gen.go | 2 +- internal/service/eks/status.go | 32 --- internal/service/eks/wait.go | 63 ----- 7 files changed, 231 insertions(+), 254 deletions(-) diff --git a/internal/service/eks/addon.go b/internal/service/eks/addon.go index ab089032a25..261275c5f82 100644 --- a/internal/service/eks/addon.go +++ b/internal/service/eks/addon.go @@ -5,18 +5,24 @@ package eks import ( "context" + "errors" + "fmt" "log" + "strings" "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" sdkid "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -26,7 +32,7 @@ import ( // @SDKResource("aws_eks_addon", name="Add-On") // @Tags(identifierAttribute="arn") -func ResourceAddon() *schema.Resource { +func resourceAddon() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAddonCreate, ReadWithoutTimeout: resourceAddonRead, @@ -89,25 +95,25 @@ func ResourceAddon() *schema.Resource { Optional: true, }, "resolve_conflicts": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(eks.ResolveConflicts_Values(), false), - Deprecated: `The "resolve_conflicts" attribute can't be set to "PRESERVE" on initial resource creation. Use "resolve_conflicts_on_create" and/or "resolve_conflicts_on_update" instead`, + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ResolveConflicts](), + Deprecated: `The "resolve_conflicts" attribute can't be set to "PRESERVE" on initial resource creation. Use "resolve_conflicts_on_create" and/or "resolve_conflicts_on_update" instead`, }, "resolve_conflicts_on_create": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - eks.ResolveConflictsNone, - eks.ResolveConflictsOverwrite, - }, false), + ValidateFunc: validation.StringInSlice(enum.Slice( + types.ResolveConflictsNone, + types.ResolveConflictsOverwrite, + ), false), ConflictsWith: []string{"resolve_conflicts"}, }, "resolve_conflicts_on_update": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(eks.ResolveConflicts_Values(), false), - ConflictsWith: []string{"resolve_conflicts"}, + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.ResolveConflicts](), + ConflictsWith: []string{"resolve_conflicts"}, }, "service_account_role_arn": { Type: schema.TypeString, @@ -122,7 +128,7 @@ func ResourceAddon() *schema.Resource { func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) addonName := d.Get("addon_name").(string) clusterName := d.Get("cluster_name").(string) @@ -143,9 +149,9 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter } if v, ok := d.GetOk("resolve_conflicts"); ok { - input.ResolveConflicts = aws.String(v.(string)) + input.ResolveConflicts = types.ResolveConflicts(v.(string)) } else if v, ok := d.GetOk("resolve_conflicts_on_create"); ok { - input.ResolveConflicts = aws.String(v.(string)) + input.ResolveConflicts = types.ResolveConflicts(v.(string)) } if v, ok := d.GetOk("service_account_role_arn"); ok { @@ -154,14 +160,14 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter _, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateAddonWithContext(ctx, input) + return conn.CreateAddon(ctx, input) }, func(err error) (bool, error) { - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "CREATE_FAILED") { + if errs.IsAErrorMessageContains[*types.InvalidParameterException](err, "CREATE_FAILED") { return true, err } - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "does not exist") { + if errs.IsAErrorMessageContains[*types.InvalidParameterException](err, "does not exist") { return true, err } @@ -194,15 +200,14 @@ func resourceAddonCreate(ctx context.Context, d *schema.ResourceData, meta inter func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, addonName, err := AddonParseResourceID(d.Id()) - if err != nil { return sdkdiag.AppendFromErr(diags, err) } - addon, err := FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) + addon, err := findAddonByTwoPartKey(ctx, conn, clusterName, addonName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Add-On (%s) not found, removing from state", d.Id()) @@ -219,8 +224,8 @@ func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interfa d.Set("arn", addon.AddonArn) d.Set("cluster_name", addon.ClusterName) d.Set("configuration_values", addon.ConfigurationValues) - d.Set("created_at", aws.TimeValue(addon.CreatedAt).Format(time.RFC3339)) - d.Set("modified_at", aws.TimeValue(addon.ModifiedAt).Format(time.RFC3339)) + d.Set("created_at", aws.ToTime(addon.CreatedAt).Format(time.RFC3339)) + d.Set("modified_at", aws.ToTime(addon.ModifiedAt).Format(time.RFC3339)) d.Set("service_account_role_arn", addon.ServiceAccountRoleArn) setTagsOut(ctx, addon.Tags) @@ -230,10 +235,9 @@ func resourceAddonRead(ctx context.Context, d *schema.ResourceData, meta interfa func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, addonName, err := AddonParseResourceID(d.Id()) - if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -253,16 +257,17 @@ func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta inter input.ConfigurationValues = aws.String(d.Get("configuration_values").(string)) } - var conflictResolutionAttr, conflictResolution string + var conflictResolutionAttr string + var conflictResolution types.ResolveConflicts if v, ok := d.GetOk("resolve_conflicts"); ok { conflictResolutionAttr = "resolve_conflicts" - conflictResolution = v.(string) - input.ResolveConflicts = aws.String(v.(string)) + conflictResolution = types.ResolveConflicts(v.(string)) + input.ResolveConflicts = conflictResolution } else if v, ok := d.GetOk("resolve_conflicts_on_update"); ok { conflictResolutionAttr = "resolve_conflicts_on_update" - conflictResolution = v.(string) - input.ResolveConflicts = aws.String(v.(string)) + conflictResolution = types.ResolveConflicts(v.(string)) + input.ResolveConflicts = conflictResolution } // If service account role ARN is already provided, use it. Otherwise, the add-on uses @@ -271,19 +276,19 @@ func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta inter input.ServiceAccountRoleArn = aws.String(d.Get("service_account_role_arn").(string)) } - output, err := conn.UpdateAddonWithContext(ctx, input) + output, err := conn.UpdateAddon(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EKS Add-On (%s): %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) if _, err := waitAddonUpdateSuccessful(ctx, conn, clusterName, addonName, updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { - if conflictResolution != eks.ResolveConflictsOverwrite { + if conflictResolution != types.ResolveConflictsOverwrite { // Changing addon version w/o setting resolve_conflicts to "OVERWRITE" // might result in a failed update if there are conflicts: // ConfigurationConflict Apply failed with 1 conflict: conflict with "kubectl"... - return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) update (%s): %s. Consider setting attribute %q to %q", d.Id(), updateID, err, conflictResolutionAttr, eks.ResolveConflictsOverwrite) + return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) update (%s): %s. Consider setting attribute %q to %q", d.Id(), updateID, err, conflictResolutionAttr, types.ResolveConflictsOverwrite) } return sdkdiag.AppendErrorf(diags, "waiting for EKS Add-On (%s) update (%s): %s", d.Id(), updateID, err) @@ -295,10 +300,9 @@ func resourceAddonUpdate(ctx context.Context, d *schema.ResourceData, meta inter func resourceAddonDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, addonName, err := AddonParseResourceID(d.Id()) - if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -309,11 +313,11 @@ func resourceAddonDelete(ctx context.Context, d *schema.ResourceData, meta inter } if v, ok := d.GetOk("preserve"); ok { - input.Preserve = aws.Bool(v.(bool)) + input.Preserve = v.(bool) } log.Printf("[DEBUG] Deleting EKS Add-On: %s", d.Id()) - _, err = conn.DeleteAddonWithContext(ctx, input) + _, err = conn.DeleteAddon(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting EKS Add-On (%s): %s", d.Id(), err) @@ -325,3 +329,187 @@ func resourceAddonDelete(ctx context.Context, d *schema.ResourceData, meta inter return diags } + +func findAddonByTwoPartKey(ctx context.Context, conn *eks.Client, clusterName, addonName string) (*types.Addon, error) { + input := &eks.DescribeAddonInput{ + AddonName: aws.String(addonName), + ClusterName: aws.String(clusterName), + } + + output, err := conn.DescribeAddon(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Addon == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Addon, nil +} + +func findAddonUpdateByThreePartKey(ctx context.Context, conn *eks.Client, clusterName, addonName, id string) (*types.Update, error) { + input := &eks.DescribeUpdateInput{ + AddonName: aws.String(addonName), + Name: aws.String(clusterName), + UpdateId: aws.String(id), + } + + output, err := conn.DescribeUpdate(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Update == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Update, nil +} + +func statusAddon(ctx context.Context, conn *eks.Client, clusterName, addonName string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findAddonByTwoPartKey(ctx, conn, clusterName, addonName) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func statusAddonUpdate(ctx context.Context, conn *eks.Client, clusterName, addonName, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findAddonUpdateByThreePartKey(ctx, conn, clusterName, addonName, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitAddonCreated(ctx context.Context, conn *eks.Client, clusterName, addonName string, timeout time.Duration) (*types.Addon, error) { + stateConf := retry.StateChangeConf{ + Pending: enum.Slice(types.AddonStatusCreating, types.AddonStatusDegraded), + Target: enum.Slice(types.AddonStatusActive), + Refresh: statusAddon(ctx, conn, clusterName, addonName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Addon); ok { + if status, health := output.Status, output.Health; status == types.AddonStatusCreateFailed && health != nil { + tfresource.SetLastError(err, addonIssuesError(health.Issues)) + } + + return output, err + } + + return nil, err +} + +func waitAddonDeleted(ctx context.Context, conn *eks.Client, clusterName, addonName string, timeout time.Duration) (*types.Addon, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.AddonStatusActive, types.AddonStatusDeleting), + Target: []string{}, + Refresh: statusAddon(ctx, conn, clusterName, addonName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Addon); ok { + if status, health := output.Status, output.Health; status == types.AddonStatusDeleteFailed && health != nil { + tfresource.SetLastError(err, addonIssuesError(health.Issues)) + } + + return output, err + } + + return nil, err +} + +func waitAddonUpdateSuccessful(ctx context.Context, conn *eks.Client, clusterName, addonName, id string, timeout time.Duration) (*types.Update, error) { + stateConf := retry.StateChangeConf{ + Pending: enum.Slice(types.UpdateStatusInProgress), + Target: enum.Slice(types.UpdateStatusSuccessful), + Refresh: statusAddonUpdate(ctx, conn, clusterName, addonName, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Update); ok { + if status := output.Status; status == types.UpdateStatusCancelled || status == types.UpdateStatusFailed { + tfresource.SetLastError(err, errorDetailsError(output.Errors)) + } + + return output, err + } + + return nil, err +} + +func addonIssueError(apiObject types.AddonIssue) error { + return fmt.Errorf("%s: %s", apiObject.Code, aws.ToString(apiObject.Message)) +} + +func addonIssuesError(apiObjects []types.AddonIssue) error { + var errs []error + + for _, apiObject := range apiObjects { + err := addonIssueError(apiObject) + + if err != nil { + errs = append(errs, fmt.Errorf("%s: %w", strings.Join(apiObject.ResourceIds, ", "), err)) + } + } + + return errors.Join(errs...) +} + +func errorDetailError(apiObject types.ErrorDetail) error { + return fmt.Errorf("%s: %s", apiObject.ErrorCode, aws.ToString(apiObject.ErrorMessage)) +} + +func errorDetailsError(apiObjects []types.ErrorDetail) error { + var errs []error + + for _, apiObject := range apiObjects { + err := errorDetailError(apiObject) + + if err != nil { + errs = append(errs, fmt.Errorf("%s: %w", strings.Join(apiObject.ResourceIds, ", "), err)) + } + } + + return errors.Join(errs...) +} diff --git a/internal/service/eks/addon_data_source.go b/internal/service/eks/addon_data_source.go index 62d821430ba..70a62bedfe2 100644 --- a/internal/service/eks/addon_data_source.go +++ b/internal/service/eks/addon_data_source.go @@ -8,16 +8,11 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/errs" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) // @SDKDataSource("aws_eks_addon") @@ -92,29 +87,3 @@ func dataSourceAddonRead(ctx context.Context, d *schema.ResourceData, meta inter return nil } - -func findAddonByTwoPartKey(ctx context.Context, conn *eks.Client, clusterName, addonName string) (*types.Addon, error) { - input := &eks.DescribeAddonInput{ - AddonName: aws.String(addonName), - ClusterName: aws.String(clusterName), - } - - output, err := conn.DescribeAddon(ctx, input) - - if errs.IsA[*types.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Addon == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.Addon, nil -} diff --git a/internal/service/eks/errors.go b/internal/service/eks/errors.go index ee8ac774ab2..3a992d1c240 100644 --- a/internal/service/eks/errors.go +++ b/internal/service/eks/errors.go @@ -13,32 +13,6 @@ import ( multierror "github.com/hashicorp/go-multierror" ) -func AddonIssueError(apiObject *eks.AddonIssue) error { - if apiObject == nil { - return nil - } - - return awserr.New(aws.StringValue(apiObject.Code), aws.StringValue(apiObject.Message), nil) -} - -func AddonIssuesError(apiObjects []*eks.AddonIssue) error { - var errors *multierror.Error - - for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - err := AddonIssueError(apiObject) - - if err != nil { - errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(aws.StringValueSlice(apiObject.ResourceIds), ", "), err)) - } - } - - return errors.ErrorOrNil() -} - func ErrorDetailError(apiObject *eks.ErrorDetail) error { if apiObject == nil { return nil diff --git a/internal/service/eks/find.go b/internal/service/eks/find.go index 4c961792188..cc345d3236c 100644 --- a/internal/service/eks/find.go +++ b/internal/service/eks/find.go @@ -12,65 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" ) -func FindAddonByClusterNameAndAddonName(ctx context.Context, conn *eks.EKS, clusterName, addonName string) (*eks.Addon, error) { - input := &eks.DescribeAddonInput{ - AddonName: aws.String(addonName), - ClusterName: aws.String(clusterName), - } - - output, err := conn.DescribeAddonWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Addon == nil { - return nil, &retry.NotFoundError{ - Message: "Empty result", - LastRequest: input, - } - } - - return output.Addon, nil -} - -func FindAddonUpdateByClusterNameAddonNameAndID(ctx context.Context, conn *eks.EKS, clusterName, addonName, id string) (*eks.Update, error) { - input := &eks.DescribeUpdateInput{ - AddonName: aws.String(addonName), - Name: aws.String(clusterName), - UpdateId: aws.String(id), - } - - output, err := conn.DescribeUpdateWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Update == nil { - return nil, &retry.NotFoundError{ - Message: "Empty result", - LastRequest: input, - } - } - - return output.Update, nil -} - func FindFargateProfileByClusterNameAndFargateProfileName(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string) (*eks.FargateProfile, error) { input := &eks.DescribeFargateProfileInput{ ClusterName: aws.String(clusterName), diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 20530d5f3bf..463ebd08f37 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -58,7 +58,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceAddon, + Factory: resourceAddon, TypeName: "aws_eks_addon", Name: "Add-On", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/eks/status.go b/internal/service/eks/status.go index 1716b138070..40207672696 100644 --- a/internal/service/eks/status.go +++ b/internal/service/eks/status.go @@ -12,38 +12,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func statusAddon(ctx context.Context, conn *eks.EKS, clusterName, addonName string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - -func statusAddonUpdate(ctx context.Context, conn *eks.EKS, clusterName, addonName, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindAddonUpdateByClusterNameAddonNameAndID(ctx, conn, clusterName, addonName, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - func statusFargateProfile(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) diff --git a/internal/service/eks/wait.go b/internal/service/eks/wait.go index 57a94e414fd..dbf970eaad0 100644 --- a/internal/service/eks/wait.go +++ b/internal/service/eks/wait.go @@ -17,69 +17,6 @@ const ( clusterDeleteRetryTimeout = 60 * time.Minute ) -func waitAddonCreated(ctx context.Context, conn *eks.EKS, clusterName, addonName string, timeout time.Duration) (*eks.Addon, error) { - stateConf := retry.StateChangeConf{ - Pending: []string{eks.AddonStatusCreating, eks.AddonStatusDegraded}, - Target: []string{eks.AddonStatusActive}, - Refresh: statusAddon(ctx, conn, clusterName, addonName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Addon); ok { - if status, health := aws.StringValue(output.Status), output.Health; status == eks.AddonStatusCreateFailed && health != nil { - tfresource.SetLastError(err, AddonIssuesError(health.Issues)) - } - - return output, err - } - - return nil, err -} - -func waitAddonDeleted(ctx context.Context, conn *eks.EKS, clusterName, addonName string, timeout time.Duration) (*eks.Addon, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.AddonStatusActive, eks.AddonStatusDeleting}, - Target: []string{}, - Refresh: statusAddon(ctx, conn, clusterName, addonName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Addon); ok { - if status, health := aws.StringValue(output.Status), output.Health; status == eks.AddonStatusDeleteFailed && health != nil { - tfresource.SetLastError(err, AddonIssuesError(health.Issues)) - } - - return output, err - } - - return nil, err -} - -func waitAddonUpdateSuccessful(ctx context.Context, conn *eks.EKS, clusterName, addonName, id string, timeout time.Duration) (*eks.Update, error) { - stateConf := retry.StateChangeConf{ - Pending: []string{eks.UpdateStatusInProgress}, - Target: []string{eks.UpdateStatusSuccessful}, - Refresh: statusAddonUpdate(ctx, conn, clusterName, addonName, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Update); ok { - if status := aws.StringValue(output.Status); status == eks.UpdateStatusCancelled || status == eks.UpdateStatusFailed { - tfresource.SetLastError(err, ErrorDetailsError(output.Errors)) - } - - return output, err - } - - return nil, err -} - func waitFargateProfileCreated(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string, timeout time.Duration) (*eks.FargateProfile, error) { stateConf := &retry.StateChangeConf{ Pending: []string{eks.FargateProfileStatusCreating}, From 5fbd3f65f0e7f13578627bc48caabefbcf846f6a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 07:52:52 -0500 Subject: [PATCH 13/46] Use AWS SDK for Go v2 for sts service. --- names/names_data.csv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/names/names_data.csv b/names/names_data.csv index ee995ad67e0..6b10cb1c6e3 100644 --- a/names/names_data.csv +++ b/names/names_data.csv @@ -342,7 +342,7 @@ sso-admin,ssoadmin,ssoadmin,ssoadmin,,ssoadmin,,,SSOAdmin,SSOAdmin,,1,,,aws_ssoa identitystore,identitystore,identitystore,identitystore,,identitystore,,,IdentityStore,IdentityStore,,,2,,aws_identitystore_,,identitystore_,SSO Identity Store,AWS,,,,,, sso-oidc,ssooidc,ssooidc,ssooidc,,ssooidc,,,SSOOIDC,SSOOIDC,,1,,,aws_ssooidc_,,ssooidc_,SSO OIDC,AWS,,x,,,, storagegateway,storagegateway,storagegateway,storagegateway,,storagegateway,,,StorageGateway,StorageGateway,,1,,,aws_storagegateway_,,storagegateway_,Storage Gateway,AWS,,,,,, -sts,sts,sts,sts,,sts,,,STS,STS,x,1,,aws_caller_identity,aws_sts_,,caller_identity,STS (Security Token),AWS,,,,AWS_STS_ENDPOINT,TF_AWS_STS_ENDPOINT, +sts,sts,sts,sts,,sts,,,STS,STS,x,,2,aws_caller_identity,aws_sts_,,caller_identity,STS (Security Token),AWS,,,,AWS_STS_ENDPOINT,TF_AWS_STS_ENDPOINT, ,,,,,,,,,,,,,,,,,Sumerian,Amazon,x,,,,,No SDK support support,support,support,support,,support,,,Support,Support,,1,,,aws_support_,,support_,Support,AWS,,x,,,, swf,swf,swf,swf,,swf,,,SWF,SWF,,,2,,aws_swf_,,swf_,SWF (Simple Workflow),Amazon,,,,,, From b1835d7fdd41eff6753da813b12a1c00f4182000 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 07:54:30 -0500 Subject: [PATCH 14/46] Add 'names.STSEndpointID'. --- names/names.go | 1 + 1 file changed, 1 insertion(+) diff --git a/names/names.go b/names/names.go index b23f54c70ec..21fadc9135f 100644 --- a/names/names.go +++ b/names/names.go @@ -70,6 +70,7 @@ const ( SSMEndpointID = "ssm" SSMContactsEndpointID = "ssm-contacts" SSMIncidentsEndpointID = "ssm-incidents" + STSEndpointID = "sts" SWFEndpointID = "swf" TimestreamWriteEndpointID = "ingest.timestream" TranscribeEndpointID = "transcribe" From 0aebe603b6ccef69a0e72982521ca2e562bf92fd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 07:57:05 -0500 Subject: [PATCH 15/46] Run 'make gen'. --- internal/conns/awsclient_gen.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index a85deb7c3cb..381a41f5a38 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -65,6 +65,7 @@ import ( ssm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ssm" ssmcontacts_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ssmcontacts" ssmincidents_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ssmincidents" + sts_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sts" swf_sdkv2 "github.com/aws/aws-sdk-go-v2/service/swf" timestreamwrite_sdkv2 "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" transcribe_sdkv2 "github.com/aws/aws-sdk-go-v2/service/transcribe" @@ -212,7 +213,6 @@ import ( ssm_sdkv1 "github.com/aws/aws-sdk-go/service/ssm" ssoadmin_sdkv1 "github.com/aws/aws-sdk-go/service/ssoadmin" storagegateway_sdkv1 "github.com/aws/aws-sdk-go/service/storagegateway" - sts_sdkv1 "github.com/aws/aws-sdk-go/service/sts" synthetics_sdkv1 "github.com/aws/aws-sdk-go/service/synthetics" transfer_sdkv1 "github.com/aws/aws-sdk-go/service/transfer" waf_sdkv1 "github.com/aws/aws-sdk-go/service/waf" @@ -971,8 +971,8 @@ func (c *AWSClient) SSOAdminConn(ctx context.Context) *ssoadmin_sdkv1.SSOAdmin { return errs.Must(conn[*ssoadmin_sdkv1.SSOAdmin](ctx, c, names.SSOAdmin)) } -func (c *AWSClient) STSConn(ctx context.Context) *sts_sdkv1.STS { - return errs.Must(conn[*sts_sdkv1.STS](ctx, c, names.STS)) +func (c *AWSClient) STSClient(ctx context.Context) *sts_sdkv2.Client { + return errs.Must(client[*sts_sdkv2.Client](ctx, c, names.STS)) } func (c *AWSClient) SWFClient(ctx context.Context) *swf_sdkv2.Client { From 793b112e0893125eecefef25ab54780158b98774 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 07:58:04 -0500 Subject: [PATCH 16/46] Run 'go get github.com/aws/aws-sdk-go-v2/service/sts@v1.25.0 && go mod tidy'. --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index ce567c7ece4..41b256af76e 100644 --- a/go.mod +++ b/go.mod @@ -71,6 +71,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssm v1.42.0 github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.19.0 github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.25.0 + github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 github.com/aws/aws-sdk-go-v2/service/swf v1.19.0 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.22.0 github.com/aws/aws-sdk-go-v2/service/transcribe v1.31.0 @@ -140,7 +141,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.1 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 // indirect github.com/aws/smithy-go v1.16.0 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/boombuler/barcode v1.0.1 // indirect From 98c6eecbf8d3aeaf94e17f2ce94f3941d67401f4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 08:54:09 -0500 Subject: [PATCH 17/46] Add 'NewClient' for sts package. --- internal/service/sts/service_package.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/internal/service/sts/service_package.go b/internal/service/sts/service_package.go index 43d41ed0710..03f01047864 100644 --- a/internal/service/sts/service_package.go +++ b/internal/service/sts/service_package.go @@ -6,19 +6,19 @@ package sts import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - sts_sdkv1 "github.com/aws/aws-sdk-go/service/sts" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + sts_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sts" ) -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, m map[string]any) (*sts_sdkv1.STS, error) { - sess := m["session"].(*session_sdkv1.Session) - config := &aws_sdkv1.Config{Endpoint: aws_sdkv1.String(m["endpoint"].(string))} +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*sts_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - if stsRegion := m["sts_region"].(string); stsRegion != "" { - config.Region = aws_sdkv1.String(stsRegion) - } - - return sts_sdkv1.New(sess.Copy(config)), nil + return sts_sdkv2.NewFromConfig(cfg, func(o *sts_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } else if stsRegion := config["sts_region"].(string); stsRegion != "" { + o.Region = stsRegion + } + }), nil } From d4407d1845c04704ad17e59d9cea6cd18e1ae4dd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 08:57:02 -0500 Subject: [PATCH 18/46] d/aws_caller_identity: Migrate to AWS SDK for Go v2. --- internal/service/sts/caller_identity_data_source.go | 6 +++--- internal/service/sts/find.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/service/sts/caller_identity_data_source.go b/internal/service/sts/caller_identity_data_source.go index f19339cc9e0..88cf07d2524 100644 --- a/internal/service/sts/caller_identity_data_source.go +++ b/internal/service/sts/caller_identity_data_source.go @@ -6,7 +6,7 @@ package sts import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" @@ -64,7 +64,7 @@ func (d *dataSourceCallerIdentity) Read(ctx context.Context, request datasource. return } - conn := d.Meta().STSConn(ctx) + conn := d.Meta().STSClient(ctx) output, err := FindCallerIdentity(ctx, conn) @@ -74,7 +74,7 @@ func (d *dataSourceCallerIdentity) Read(ctx context.Context, request datasource. return } - accountID := aws.StringValue(output.Account) + accountID := aws.ToString(output.Account) data.AccountID = types.StringValue(accountID) data.ARN = flex.StringToFrameworkLegacy(ctx, output.Arn) data.ID = types.StringValue(accountID) diff --git a/internal/service/sts/find.go b/internal/service/sts/find.go index 7c66d2fac90..c067813ddc1 100644 --- a/internal/service/sts/find.go +++ b/internal/service/sts/find.go @@ -6,14 +6,14 @@ package sts import ( "context" - "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func FindCallerIdentity(ctx context.Context, conn *sts.STS) (*sts.GetCallerIdentityOutput, error) { +func FindCallerIdentity(ctx context.Context, conn *sts.Client) (*sts.GetCallerIdentityOutput, error) { input := &sts.GetCallerIdentityInput{} - output, err := conn.GetCallerIdentityWithContext(ctx, input) + output, err := conn.GetCallerIdentity(ctx, input) if err != nil { return nil, err From 9e18a70277349964eeef61a01ca6e5924cc0c43b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 09:04:30 -0500 Subject: [PATCH 19/46] acctest: sts uses AWS SDK for Go v2. --- internal/acctest/acctest.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/acctest/acctest.go b/internal/acctest/acctest.go index 11ea3b3bf88..18bd55e122d 100644 --- a/internal/acctest/acctest.go +++ b/internal/acctest/acctest.go @@ -979,7 +979,7 @@ func PreCheckOrganizationManagementAccount(ctx context.Context, t *testing.T) { t.Fatalf("describing AWS Organization: %s", err) } - callerIdentity, err := tfsts.FindCallerIdentity(ctx, Provider.Meta().(*conns.AWSClient).STSConn(ctx)) + callerIdentity, err := tfsts.FindCallerIdentity(ctx, Provider.Meta().(*conns.AWSClient).STSClient(ctx)) if err != nil { t.Fatalf("getting current identity: %s", err) @@ -997,7 +997,7 @@ func PreCheckOrganizationMemberAccount(ctx context.Context, t *testing.T) { t.Fatalf("describing AWS Organization: %s", err) } - callerIdentity, err := tfsts.FindCallerIdentity(ctx, Provider.Meta().(*conns.AWSClient).STSConn(ctx)) + callerIdentity, err := tfsts.FindCallerIdentity(ctx, Provider.Meta().(*conns.AWSClient).STSClient(ctx)) if err != nil { t.Fatalf("getting current identity: %s", err) From fb979749e2684e6a5d8150c2a7b027a07464dfe8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 09:05:16 -0500 Subject: [PATCH 20/46] Remove 'TestAccProvider_Region_stsRegion' as it can't be tested with AWS SDK for Go v2. --- internal/provider/provider_acc_test.go | 51 -------------------------- 1 file changed, 51 deletions(-) diff --git a/internal/provider/provider_acc_test.go b/internal/provider/provider_acc_test.go index 8c7ec0fecbc..1bf576e38ff 100644 --- a/internal/provider/provider_acc_test.go +++ b/internal/provider/provider_acc_test.go @@ -462,28 +462,6 @@ func TestAccProvider_Region_sc2s(t *testing.T) { }) } -func TestAccProvider_Region_stsRegion(t *testing.T) { - ctx := acctest.Context(t) - var provider *schema.Provider - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t), - ProtoV5ProviderFactories: testAccProtoV5ProviderFactoriesInternal(ctx, t, &provider), - CheckDestroy: nil, - Steps: []resource.TestStep{ - { - Config: testAccProviderConfig_stsRegion(endpoints.UsEast1RegionID, endpoints.UsWest2RegionID), - Check: resource.ComposeTestCheckFunc( - testAccCheckRegion(ctx, t, &provider, endpoints.UsEast1RegionID), - testAccCheckSTSRegion(ctx, t, &provider, endpoints.UsWest2RegionID), - ), - PlanOnly: true, - }, - }, - }) -} - func TestAccProvider_AssumeRole_empty(t *testing.T) { ctx := acctest.Context(t) resource.ParallelTest(t, resource.TestCase{ @@ -565,22 +543,6 @@ func testAccCheckRegion(ctx context.Context, t *testing.T, p **schema.Provider, } } -func testAccCheckSTSRegion(ctx context.Context, t *testing.T, p **schema.Provider, expectedRegion string) resource.TestCheckFunc { //nolint:unparam - return func(s *terraform.State) error { - if p == nil || *p == nil || (*p).Meta() == nil || (*p).Meta().(*conns.AWSClient) == nil { - return fmt.Errorf("provider not initialized") - } - - stsRegion := aws.StringValue((*p).Meta().(*conns.AWSClient).STSConn(ctx).Config.Region) - - if stsRegion != expectedRegion { - return fmt.Errorf("expected STS Region (%s), got: %s", expectedRegion, stsRegion) - } - - return nil - } -} - func testAccCheckReverseDNSPrefix(ctx context.Context, t *testing.T, p **schema.Provider, expectedReverseDnsPrefix string) resource.TestCheckFunc { //nolint:unparam return func(s *terraform.State) error { if p == nil || *p == nil || (*p).Meta() == nil || (*p).Meta().(*conns.AWSClient) == nil { @@ -1078,16 +1040,3 @@ provider "aws" { } `, region)) } - -func testAccProviderConfig_stsRegion(region, stsRegion string) string { - //lintignore:AT004 - return acctest.ConfigCompose(testAccProviderConfig_base, fmt.Sprintf(` -provider "aws" { - region = %[1]q - sts_region = %[2]q - skip_credentials_validation = true - skip_metadata_api_check = true - skip_requesting_account_id = true -} -`, region, stsRegion)) -} From badfc5e3ab3c18c226b4e90d8853270de6cf89ef Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 09:07:13 -0500 Subject: [PATCH 21/46] dms: Remove 'testAccPreCheckEKS' -- it's not needed. --- .../service/dms/event_subscription_test.go | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/internal/service/dms/event_subscription_test.go b/internal/service/dms/event_subscription_test.go index c6665a4c870..e222bd4068a 100644 --- a/internal/service/dms/event_subscription_test.go +++ b/internal/service/dms/event_subscription_test.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -168,7 +167,7 @@ func TestAccDMSEventSubscription_tags(t *testing.T) { resourceName := "aws_dms_event_subscription.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckEKS(ctx, t) }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckEventSubscriptionDestroy(ctx), @@ -387,19 +386,3 @@ resource "aws_dms_event_subscription" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } - -func testAccPreCheckEKS(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) - - input := &eks.ListClustersInput{} - - _, err := conn.ListClustersWithContext(ctx, input) - - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} From c719d86252f2d3ceefb4d2ff051c5eb8e06790ef Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 10:26:55 -0500 Subject: [PATCH 22/46] d/aws_caller_identity: Migrate acceptance tests to AWS SDK for Go v2. --- internal/service/sts/caller_identity_data_source_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/sts/caller_identity_data_source_test.go b/internal/service/sts/caller_identity_data_source_test.go index bafd5132283..f5c3bde810d 100644 --- a/internal/service/sts/caller_identity_data_source_test.go +++ b/internal/service/sts/caller_identity_data_source_test.go @@ -6,16 +6,16 @@ package sts_test import ( "testing" - "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccSTSCallerIdentityDataSource_basic(t *testing.T) { ctx := acctest.Context(t) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, sts.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.STSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { From 7fb1084a52a7dbc4b02c6485f18d27832e0d4b33 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 10:28:08 -0500 Subject: [PATCH 23/46] Use AWS SDK for Go v1 & v2 for sts service. --- names/names_data.csv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/names/names_data.csv b/names/names_data.csv index 6b10cb1c6e3..dd98d306b4f 100644 --- a/names/names_data.csv +++ b/names/names_data.csv @@ -342,7 +342,7 @@ sso-admin,ssoadmin,ssoadmin,ssoadmin,,ssoadmin,,,SSOAdmin,SSOAdmin,,1,,,aws_ssoa identitystore,identitystore,identitystore,identitystore,,identitystore,,,IdentityStore,IdentityStore,,,2,,aws_identitystore_,,identitystore_,SSO Identity Store,AWS,,,,,, sso-oidc,ssooidc,ssooidc,ssooidc,,ssooidc,,,SSOOIDC,SSOOIDC,,1,,,aws_ssooidc_,,ssooidc_,SSO OIDC,AWS,,x,,,, storagegateway,storagegateway,storagegateway,storagegateway,,storagegateway,,,StorageGateway,StorageGateway,,1,,,aws_storagegateway_,,storagegateway_,Storage Gateway,AWS,,,,,, -sts,sts,sts,sts,,sts,,,STS,STS,x,,2,aws_caller_identity,aws_sts_,,caller_identity,STS (Security Token),AWS,,,,AWS_STS_ENDPOINT,TF_AWS_STS_ENDPOINT, +sts,sts,sts,sts,,sts,,,STS,STS,x,1,2,aws_caller_identity,aws_sts_,,caller_identity,STS (Security Token),AWS,,,,AWS_STS_ENDPOINT,TF_AWS_STS_ENDPOINT, ,,,,,,,,,,,,,,,,,Sumerian,Amazon,x,,,,,No SDK support support,support,support,support,,support,,,Support,Support,,1,,,aws_support_,,support_,Support,AWS,,x,,,, swf,swf,swf,swf,,swf,,,SWF,SWF,,,2,,aws_swf_,,swf_,SWF (Simple Workflow),Amazon,,,,,, From 36c898ff0e7905ae67574db723e42544f67310a5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 10:28:44 -0500 Subject: [PATCH 24/46] Run 'make gen'. --- internal/conns/awsclient_gen.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 381a41f5a38..deff95ce107 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -213,6 +213,7 @@ import ( ssm_sdkv1 "github.com/aws/aws-sdk-go/service/ssm" ssoadmin_sdkv1 "github.com/aws/aws-sdk-go/service/ssoadmin" storagegateway_sdkv1 "github.com/aws/aws-sdk-go/service/storagegateway" + sts_sdkv1 "github.com/aws/aws-sdk-go/service/sts" synthetics_sdkv1 "github.com/aws/aws-sdk-go/service/synthetics" transfer_sdkv1 "github.com/aws/aws-sdk-go/service/transfer" waf_sdkv1 "github.com/aws/aws-sdk-go/service/waf" @@ -971,6 +972,10 @@ func (c *AWSClient) SSOAdminConn(ctx context.Context) *ssoadmin_sdkv1.SSOAdmin { return errs.Must(conn[*ssoadmin_sdkv1.SSOAdmin](ctx, c, names.SSOAdmin)) } +func (c *AWSClient) STSConn(ctx context.Context) *sts_sdkv1.STS { + return errs.Must(conn[*sts_sdkv1.STS](ctx, c, names.STS)) +} + func (c *AWSClient) STSClient(ctx context.Context) *sts_sdkv2.Client { return errs.Must(client[*sts_sdkv2.Client](ctx, c, names.STS)) } From f8066d3d930dbf49955ea88f596e47cf384ebf52 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 10:32:58 -0500 Subject: [PATCH 25/46] Restore 'NewConn' for sts package. --- internal/service/sts/service_package.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/internal/service/sts/service_package.go b/internal/service/sts/service_package.go index 03f01047864..276f3084acf 100644 --- a/internal/service/sts/service_package.go +++ b/internal/service/sts/service_package.go @@ -8,8 +8,23 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" sts_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sts" + aws_sdkv1 "github.com/aws/aws-sdk-go/aws" + session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" + sts_sdkv1 "github.com/aws/aws-sdk-go/service/sts" ) +// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. +func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*sts_sdkv1.STS, error) { + sess := config["session"].(*session_sdkv1.Session) + cfg := &aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))} + + if stsRegion := config["sts_region"].(string); stsRegion != "" { + cfg.Region = aws_sdkv1.String(stsRegion) + } + + return sts_sdkv1.New(sess.Copy(cfg)), nil +} + // NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*sts_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) From 4675147324a24b66ea8e1fe4f66f51757967ede7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 10:33:00 -0500 Subject: [PATCH 26/46] Revert "Remove 'TestAccProvider_Region_stsRegion' as it can't be tested with AWS SDK for Go v2." This reverts commit fb979749e2684e6a5d8150c2a7b027a07464dfe8. --- internal/provider/provider_acc_test.go | 51 ++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/internal/provider/provider_acc_test.go b/internal/provider/provider_acc_test.go index 1bf576e38ff..8c7ec0fecbc 100644 --- a/internal/provider/provider_acc_test.go +++ b/internal/provider/provider_acc_test.go @@ -462,6 +462,28 @@ func TestAccProvider_Region_sc2s(t *testing.T) { }) } +func TestAccProvider_Region_stsRegion(t *testing.T) { + ctx := acctest.Context(t) + var provider *schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t), + ProtoV5ProviderFactories: testAccProtoV5ProviderFactoriesInternal(ctx, t, &provider), + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccProviderConfig_stsRegion(endpoints.UsEast1RegionID, endpoints.UsWest2RegionID), + Check: resource.ComposeTestCheckFunc( + testAccCheckRegion(ctx, t, &provider, endpoints.UsEast1RegionID), + testAccCheckSTSRegion(ctx, t, &provider, endpoints.UsWest2RegionID), + ), + PlanOnly: true, + }, + }, + }) +} + func TestAccProvider_AssumeRole_empty(t *testing.T) { ctx := acctest.Context(t) resource.ParallelTest(t, resource.TestCase{ @@ -543,6 +565,22 @@ func testAccCheckRegion(ctx context.Context, t *testing.T, p **schema.Provider, } } +func testAccCheckSTSRegion(ctx context.Context, t *testing.T, p **schema.Provider, expectedRegion string) resource.TestCheckFunc { //nolint:unparam + return func(s *terraform.State) error { + if p == nil || *p == nil || (*p).Meta() == nil || (*p).Meta().(*conns.AWSClient) == nil { + return fmt.Errorf("provider not initialized") + } + + stsRegion := aws.StringValue((*p).Meta().(*conns.AWSClient).STSConn(ctx).Config.Region) + + if stsRegion != expectedRegion { + return fmt.Errorf("expected STS Region (%s), got: %s", expectedRegion, stsRegion) + } + + return nil + } +} + func testAccCheckReverseDNSPrefix(ctx context.Context, t *testing.T, p **schema.Provider, expectedReverseDnsPrefix string) resource.TestCheckFunc { //nolint:unparam return func(s *terraform.State) error { if p == nil || *p == nil || (*p).Meta() == nil || (*p).Meta().(*conns.AWSClient) == nil { @@ -1040,3 +1078,16 @@ provider "aws" { } `, region)) } + +func testAccProviderConfig_stsRegion(region, stsRegion string) string { + //lintignore:AT004 + return acctest.ConfigCompose(testAccProviderConfig_base, fmt.Sprintf(` +provider "aws" { + region = %[1]q + sts_region = %[2]q + skip_credentials_validation = true + skip_metadata_api_check = true + skip_requesting_account_id = true +} +`, region, stsRegion)) +} From 1ead577e15097faa7d78c50d8de9286688b352ef Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 10:34:12 -0500 Subject: [PATCH 27/46] d/aws_eks_cluster_auth: Migrate to AWS SDK for Go v2. --- internal/service/eks/cluster_auth_data_source.go | 12 ++++++------ internal/service/eks/service_package_gen.go | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/service/eks/cluster_auth_data_source.go b/internal/service/eks/cluster_auth_data_source.go index c97a8092909..2d5bf515b57 100644 --- a/internal/service/eks/cluster_auth_data_source.go +++ b/internal/service/eks/cluster_auth_data_source.go @@ -14,7 +14,7 @@ import ( ) // @SDKDataSource("aws_eks_cluster_auth") -func DataSourceClusterAuth() *schema.Resource { +func dataSourceClusterAuth() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceClusterAuthRead, @@ -24,7 +24,6 @@ func DataSourceClusterAuth() *schema.Resource { Required: true, ValidateFunc: validation.NoZeroValues, }, - "token": { Type: schema.TypeString, Computed: true, @@ -37,18 +36,19 @@ func DataSourceClusterAuth() *schema.Resource { func dataSourceClusterAuthRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).STSConn(ctx) + name := d.Get("name").(string) generator, err := NewGenerator(false, false) if err != nil { - return sdkdiag.AppendErrorf(diags, "getting token generator: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - toke, err := generator.GetWithSTS(ctx, name, conn) + token, err := generator.GetWithSTS(ctx, name, conn) if err != nil { - return sdkdiag.AppendErrorf(diags, "getting token: %s", err) + return sdkdiag.AppendErrorf(diags, "reading EKS Cluster (%s) Authentication Token: %s", name, err) } d.SetId(name) - d.Set("token", toke.Token) + d.Set("token", token.Token) return diags } diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 463ebd08f37..7a2af7aae5f 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -37,7 +37,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_eks_cluster", }, { - Factory: DataSourceClusterAuth, + Factory: dataSourceClusterAuth, TypeName: "aws_eks_cluster_auth", }, { From 28f927c41fd0ccff16f671cd4442f989cc778b88 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 10:45:59 -0500 Subject: [PATCH 28/46] r/aws_eks_identity_provider_config: Migrate to AWS SDK for Go v2. --- internal/service/eks/find.go | 32 ---- .../service/eks/identity_provider_config.go | 150 +++++++++++++----- internal/service/eks/service_package_gen.go | 2 +- internal/service/eks/status.go | 16 -- internal/service/eks/wait.go | 34 ---- 5 files changed, 111 insertions(+), 123 deletions(-) diff --git a/internal/service/eks/find.go b/internal/service/eks/find.go index cc345d3236c..457cd8d4da1 100644 --- a/internal/service/eks/find.go +++ b/internal/service/eks/find.go @@ -99,35 +99,3 @@ func FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx context.Context, con return output.Update, nil } - -func FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx context.Context, conn *eks.EKS, clusterName, configName string) (*eks.OidcIdentityProviderConfig, error) { - input := &eks.DescribeIdentityProviderConfigInput{ - ClusterName: aws.String(clusterName), - IdentityProviderConfig: &eks.IdentityProviderConfig{ - Name: aws.String(configName), - Type: aws.String(IdentityProviderConfigTypeOIDC), - }, - } - - output, err := conn.DescribeIdentityProviderConfigWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.IdentityProviderConfig == nil || output.IdentityProviderConfig.Oidc == nil { - return nil, &retry.NotFoundError{ - Message: "Empty result", - LastRequest: input, - } - } - - return output.IdentityProviderConfig.Oidc, nil -} diff --git a/internal/service/eks/identity_provider_config.go b/internal/service/eks/identity_provider_config.go index 7bd35298854..ce971245886 100644 --- a/internal/service/eks/identity_provider_config.go +++ b/internal/service/eks/identity_provider_config.go @@ -8,14 +8,17 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -25,7 +28,7 @@ import ( // @SDKResource("aws_eks_identity_provider_config", name="Identity Provider Config") // @Tags(identifierAttribute="arn") -func ResourceIdentityProviderConfig() *schema.Resource { +func resourceIdentityProviderConfig() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceIdentityProviderConfigCreate, ReadWithoutTimeout: resourceIdentityProviderConfigRead, @@ -48,14 +51,12 @@ func ResourceIdentityProviderConfig() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "cluster_name": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.NoZeroValues, }, - "oidc": { Type: schema.TypeList, Required: true, @@ -118,12 +119,10 @@ func ResourceIdentityProviderConfig() *schema.Resource { }, }, }, - "status": { Type: schema.TypeString, Computed: true, }, - names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), }, @@ -131,7 +130,7 @@ func ResourceIdentityProviderConfig() *schema.Resource { } func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName := d.Get("cluster_name").(string) configName, oidc := expandOIDCIdentityProviderConfigRequest(d.Get("oidc").([]interface{})[0].(map[string]interface{})) @@ -143,7 +142,7 @@ func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.Resourc Tags: getTagsIn(ctx), } - _, err := conn.AssociateIdentityProviderConfigWithContext(ctx, input) + _, err := conn.AssociateIdentityProviderConfig(ctx, input) if err != nil { return diag.Errorf("associating EKS Identity Provider Config (%s): %s", idpID, err) @@ -151,25 +150,22 @@ func resourceIdentityProviderConfigCreate(ctx context.Context, d *schema.Resourc d.SetId(idpID) - _, err = waitOIDCIdentityProviderConfigCreated(ctx, conn, clusterName, configName, d.Timeout(schema.TimeoutCreate)) - - if err != nil { - return diag.Errorf("waiting for EKS Identity Provider Config (%s) association: %s", d.Id(), err) + if _, err := waitOIDCIdentityProviderConfigCreated(ctx, conn, clusterName, configName, d.Timeout(schema.TimeoutCreate)); err != nil { + return diag.Errorf("waiting for EKS Identity Provider Config (%s) create: %s", d.Id(), err) } return resourceIdentityProviderConfigRead(ctx, d, meta) } func resourceIdentityProviderConfigRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, configName, err := IdentityProviderConfigParseResourceID(d.Id()) - if err != nil { return diag.FromErr(err) } - oidc, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) + oidc, err := findOIDCIdentityProviderConfigByTwoPartKey(ctx, conn, clusterName, configName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Identity Provider Config (%s) not found, removing from state", d.Id()) @@ -183,11 +179,9 @@ func resourceIdentityProviderConfigRead(ctx context.Context, d *schema.ResourceD d.Set("arn", oidc.IdentityProviderConfigArn) d.Set("cluster_name", oidc.ClusterName) - if err := d.Set("oidc", []interface{}{flattenOIDCIdentityProviderConfig(oidc)}); err != nil { return diag.Errorf("setting oidc: %s", err) } - d.Set("status", oidc.Status) setTagsOut(ctx, oidc.Tags) @@ -201,28 +195,27 @@ func resourceIdentityProviderConfigUpdate(ctx context.Context, d *schema.Resourc } func resourceIdentityProviderConfigDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, configName, err := IdentityProviderConfigParseResourceID(d.Id()) - if err != nil { return diag.FromErr(err) } log.Printf("[DEBUG] Disassociating EKS Identity Provider Config: %s", d.Id()) - _, err = conn.DisassociateIdentityProviderConfigWithContext(ctx, &eks.DisassociateIdentityProviderConfigInput{ + _, err = conn.DisassociateIdentityProviderConfig(ctx, &eks.DisassociateIdentityProviderConfigInput{ ClusterName: aws.String(clusterName), - IdentityProviderConfig: &eks.IdentityProviderConfig{ + IdentityProviderConfig: &types.IdentityProviderConfig{ Name: aws.String(configName), Type: aws.String(IdentityProviderConfigTypeOIDC), }, }) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil } - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidRequestException, "Identity provider config is not associated with cluster") { + if errs.IsAErrorMessageContains[*types.InvalidRequestException](err, "Identity provider config is not associated with cluster") { return nil } @@ -230,21 +223,98 @@ func resourceIdentityProviderConfigDelete(ctx context.Context, d *schema.Resourc return diag.Errorf("disassociating EKS Identity Provider Config (%s): %s", d.Id(), err) } - _, err = waitOIDCIdentityProviderConfigDeleted(ctx, conn, clusterName, configName, d.Timeout(schema.TimeoutDelete)) + if _, err := waitOIDCIdentityProviderConfigDeleted(ctx, conn, clusterName, configName, d.Timeout(schema.TimeoutDelete)); err != nil { + return diag.Errorf("waiting for EKS Identity Provider Config (%s) delete: %s", d.Id(), err) + } + + return nil +} + +func findOIDCIdentityProviderConfigByTwoPartKey(ctx context.Context, conn *eks.Client, clusterName, configName string) (*types.OidcIdentityProviderConfig, error) { + input := &eks.DescribeIdentityProviderConfigInput{ + ClusterName: aws.String(clusterName), + IdentityProviderConfig: &types.IdentityProviderConfig{ + Name: aws.String(configName), + Type: aws.String(IdentityProviderConfigTypeOIDC), + }, + } + + output, err := conn.DescribeIdentityProviderConfig(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } if err != nil { - return diag.Errorf("waiting for EKS Identity Provider Config (%s) disassociation: %s", d.Id(), err) + return nil, err } - return nil + if output == nil || output.IdentityProviderConfig == nil || output.IdentityProviderConfig.Oidc == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.IdentityProviderConfig.Oidc, nil +} + +func statusOIDCIdentityProviderConfig(ctx context.Context, conn *eks.Client, clusterName, configName string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findOIDCIdentityProviderConfigByTwoPartKey(ctx, conn, clusterName, configName) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitOIDCIdentityProviderConfigCreated(ctx context.Context, conn *eks.Client, clusterName, configName string, timeout time.Duration) (*types.OidcIdentityProviderConfig, error) { + stateConf := retry.StateChangeConf{ + Pending: enum.Slice(types.ConfigStatusCreating), + Target: enum.Slice(types.ConfigStatusActive), + Refresh: statusOIDCIdentityProviderConfig(ctx, conn, clusterName, configName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.OidcIdentityProviderConfig); ok { + return output, err + } + + return nil, err +} + +func waitOIDCIdentityProviderConfigDeleted(ctx context.Context, conn *eks.Client, clusterName, configName string, timeout time.Duration) (*types.OidcIdentityProviderConfig, error) { + stateConf := retry.StateChangeConf{ + Pending: enum.Slice(types.ConfigStatusActive, types.ConfigStatusDeleting), + Target: []string{}, + Refresh: statusOIDCIdentityProviderConfig(ctx, conn, clusterName, configName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.OidcIdentityProviderConfig); ok { + return output, err + } + + return nil, err } -func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (string, *eks.OidcIdentityProviderConfigRequest) { +func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (string, *types.OidcIdentityProviderConfigRequest) { if tfMap == nil { return "", nil } - apiObject := &eks.OidcIdentityProviderConfigRequest{} + apiObject := &types.OidcIdentityProviderConfigRequest{} if v, ok := tfMap["client_id"].(string); ok && v != "" { apiObject.ClientId = aws.String(v) @@ -269,7 +339,7 @@ func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (stri } if v, ok := tfMap["required_claims"].(map[string]interface{}); ok && len(v) > 0 { - apiObject.RequiredClaims = flex.ExpandStringMap(v) + apiObject.RequiredClaims = flex.ExpandStringValueMap(v) } if v, ok := tfMap["username_claim"].(string); ok && v != "" { @@ -283,7 +353,7 @@ func expandOIDCIdentityProviderConfigRequest(tfMap map[string]interface{}) (stri return identityProviderConfigName, apiObject } -func flattenOIDCIdentityProviderConfig(apiObject *eks.OidcIdentityProviderConfig) map[string]interface{} { +func flattenOIDCIdentityProviderConfig(apiObject *types.OidcIdentityProviderConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -291,35 +361,35 @@ func flattenOIDCIdentityProviderConfig(apiObject *eks.OidcIdentityProviderConfig tfMap := map[string]interface{}{} if v := apiObject.ClientId; v != nil { - tfMap["client_id"] = aws.StringValue(v) + tfMap["client_id"] = aws.ToString(v) } if v := apiObject.GroupsClaim; v != nil { - tfMap["groups_claim"] = aws.StringValue(v) + tfMap["groups_claim"] = aws.ToString(v) } if v := apiObject.GroupsPrefix; v != nil { - tfMap["groups_prefix"] = aws.StringValue(v) + tfMap["groups_prefix"] = aws.ToString(v) } if v := apiObject.IdentityProviderConfigName; v != nil { - tfMap["identity_provider_config_name"] = aws.StringValue(v) + tfMap["identity_provider_config_name"] = aws.ToString(v) } if v := apiObject.IssuerUrl; v != nil { - tfMap["issuer_url"] = aws.StringValue(v) + tfMap["issuer_url"] = aws.ToString(v) } if v := apiObject.RequiredClaims; v != nil { - tfMap["required_claims"] = aws.StringValueMap(v) + tfMap["required_claims"] = v } if v := apiObject.UsernameClaim; v != nil { - tfMap["username_claim"] = aws.StringValue(v) + tfMap["username_claim"] = aws.ToString(v) } if v := apiObject.UsernamePrefix; v != nil { - tfMap["username_prefix"] = aws.StringValue(v) + tfMap["username_prefix"] = aws.ToString(v) } return tfMap diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 7a2af7aae5f..56ca92c5684 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -82,7 +82,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceIdentityProviderConfig, + Factory: resourceIdentityProviderConfig, TypeName: "aws_eks_identity_provider_config", Name: "Identity Provider Config", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/eks/status.go b/internal/service/eks/status.go index 40207672696..fd3c09033f1 100644 --- a/internal/service/eks/status.go +++ b/internal/service/eks/status.go @@ -59,19 +59,3 @@ func statusNodegroupUpdate(ctx context.Context, conn *eks.EKS, clusterName, node return output, aws.StringValue(output.Status), nil } } - -func statusOIDCIdentityProviderConfig(ctx context.Context, conn *eks.EKS, clusterName, configName string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} diff --git a/internal/service/eks/wait.go b/internal/service/eks/wait.go index dbf970eaad0..fce4627c7ab 100644 --- a/internal/service/eks/wait.go +++ b/internal/service/eks/wait.go @@ -113,37 +113,3 @@ func waitNodegroupUpdateSuccessful(ctx context.Context, conn *eks.EKS, clusterNa return nil, err } - -func waitOIDCIdentityProviderConfigCreated(ctx context.Context, conn *eks.EKS, clusterName, configName string, timeout time.Duration) (*eks.OidcIdentityProviderConfig, error) { - stateConf := retry.StateChangeConf{ - Pending: []string{eks.ConfigStatusCreating}, - Target: []string{eks.ConfigStatusActive}, - Refresh: statusOIDCIdentityProviderConfig(ctx, conn, clusterName, configName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.OidcIdentityProviderConfig); ok { - return output, err - } - - return nil, err -} - -func waitOIDCIdentityProviderConfigDeleted(ctx context.Context, conn *eks.EKS, clusterName, configName string, timeout time.Duration) (*eks.OidcIdentityProviderConfig, error) { - stateConf := retry.StateChangeConf{ - Pending: []string{eks.ConfigStatusActive, eks.ConfigStatusDeleting}, - Target: []string{}, - Refresh: statusOIDCIdentityProviderConfig(ctx, conn, clusterName, configName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.OidcIdentityProviderConfig); ok { - return output, err - } - - return nil, err -} From 06f41279a730f67ba8d3b58bef37701cbaaacbf9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 11:54:04 -0500 Subject: [PATCH 29/46] d/aws_eks_node_groups: Migrate to AWS SDK for Go v2. --- .../service/eks/node_groups_data_source.go | 31 +++++++------------ internal/service/eks/service_package_gen.go | 2 +- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/internal/service/eks/node_groups_data_source.go b/internal/service/eks/node_groups_data_source.go index 4f3eb7bc7dc..1e451b2408b 100644 --- a/internal/service/eks/node_groups_data_source.go +++ b/internal/service/eks/node_groups_data_source.go @@ -6,8 +6,8 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -16,7 +16,7 @@ import ( ) // @SDKDataSource("aws_eks_node_groups") -func DataSourceNodeGroups() *schema.Resource { +func dataSourceNodeGroups() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceNodeGroupsRead, @@ -37,34 +37,27 @@ func DataSourceNodeGroups() *schema.Resource { func dataSourceNodeGroupsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName := d.Get("cluster_name").(string) - input := &eks.ListNodegroupsInput{ ClusterName: aws.String(clusterName), } + var nodeGroups []string + pages := eks.NewListNodegroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - var nodegroups []*string - - err := conn.ListNodegroupsPagesWithContext(ctx, input, func(page *eks.ListNodegroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing EKS Node Groups: %s", err) } - nodegroups = append(nodegroups, page.Nodegroups...) - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing EKS Node Groups: %s", err) + nodeGroups = append(nodeGroups, page.Nodegroups...) } d.SetId(clusterName) - d.Set("cluster_name", clusterName) - d.Set("names", aws.StringValueSlice(nodegroups)) + d.Set("names", nodeGroups) return diags } diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 56ca92c5684..709e26087ea 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -49,7 +49,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_eks_node_group", }, { - Factory: DataSourceNodeGroups, + Factory: dataSourceNodeGroups, TypeName: "aws_eks_node_groups", }, } From 491792aa75cee2011a8eed9538c36477e09b5d5e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 11:58:23 -0500 Subject: [PATCH 30/46] d/aws_eks_clusters: Migrate to AWS SDK for Go v2. --- internal/service/eks/clusters_data_source.go | 27 ++++++++------------ internal/service/eks/service_package_gen.go | 2 +- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/internal/service/eks/clusters_data_source.go b/internal/service/eks/clusters_data_source.go index ffdd46b1a90..53662b344ee 100644 --- a/internal/service/eks/clusters_data_source.go +++ b/internal/service/eks/clusters_data_source.go @@ -6,8 +6,7 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -15,7 +14,7 @@ import ( ) // @SDKDataSource("aws_eks_clusters") -func DataSourceClusters() *schema.Resource { +func dataSourceClusters() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceClustersRead, @@ -31,27 +30,23 @@ func DataSourceClusters() *schema.Resource { func dataSourceClustersRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) - var clusters []*string + input := &eks.ListClustersInput{} + var clusters []string + pages := eks.NewListClustersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - err := conn.ListClustersPagesWithContext(ctx, &eks.ListClustersInput{}, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing EKS Clusters: %s", err) } clusters = append(clusters, page.Clusters...) - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing EKS Clusters: %s", err) } d.SetId(meta.(*conns.AWSClient).Region) - - d.Set("names", aws.StringValueSlice(clusters)) + d.Set("names", clusters) return diags } diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 709e26087ea..336b8e608cd 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -41,7 +41,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_eks_cluster_auth", }, { - Factory: DataSourceClusters, + Factory: dataSourceClusters, TypeName: "aws_eks_clusters", }, { From fba818bbb80a69624504710dd550ff8f2c6f8f73 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 12:54:33 -0500 Subject: [PATCH 31/46] r/aws_eks_fargate_profile: Migrate to AWS SDK for Go v2. --- internal/service/eks/fargate_profile.go | 164 +++++++++++++------- internal/service/eks/find.go | 29 ---- internal/service/eks/service_package_gen.go | 2 +- internal/service/eks/status.go | 16 -- internal/service/eks/wait.go | 34 ---- 5 files changed, 109 insertions(+), 136 deletions(-) diff --git a/internal/service/eks/fargate_profile.go b/internal/service/eks/fargate_profile.go index 55a4954086d..747c5cb0af0 100644 --- a/internal/service/eks/fargate_profile.go +++ b/internal/service/eks/fargate_profile.go @@ -9,15 +9,17 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -28,12 +30,13 @@ import ( // @SDKResource("aws_eks_fargate_profile", name="Fargate Profile") // @Tags(identifierAttribute="arn") -func ResourceFargateProfile() *schema.Resource { +func resourceFargateProfile() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceFargateProfileCreate, ReadWithoutTimeout: resourceFargateProfileRead, UpdateWithoutTimeout: resourceFargateProfileUpdate, DeleteWithoutTimeout: resourceFargateProfileDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -109,7 +112,7 @@ func ResourceFargateProfile() *schema.Resource { func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName := d.Get("cluster_name").(string) fargateProfileName := d.Get("fargate_profile_name").(string) @@ -120,7 +123,7 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m FargateProfileName: aws.String(fargateProfileName), PodExecutionRoleArn: aws.String(d.Get("pod_execution_role_arn").(string)), Selectors: expandFargateProfileSelectors(d.Get("selector").(*schema.Set).List()), - Subnets: flex.ExpandStringSet(d.Get("subnet_ids").(*schema.Set)), + Subnets: flex.ExpandStringValueSet(d.Get("subnet_ids").(*schema.Set)), Tags: getTagsIn(ctx), } @@ -129,25 +132,11 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m conns.GlobalMutexKV.Lock(mutexKey) defer conns.GlobalMutexKV.Unlock(mutexKey) - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { - _, err := conn.CreateFargateProfileWithContext(ctx, input) - - // Retry for IAM eventual consistency on error: - // InvalidParameterException: Misconfigured PodExecutionRole Trust Policy; Please add the eks-fargate-pods.amazonaws.com Service Principal - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "Misconfigured PodExecutionRole Trust Policy") { - return retry.RetryableError(err) - } - - if err != nil { - return retry.NonRetryableError(err) - } - - return nil - }) - - if tfresource.TimedOut(err) { - _, err = conn.CreateFargateProfileWithContext(ctx, input) - } + // Retry for IAM eventual consistency on error: + // InvalidParameterException: Misconfigured PodExecutionRole Trust Policy; Please add the eks-fargate-pods.amazonaws.com Service Principal + _, err := tfresource.RetryWhenIsAErrorMessageContains[*types.InvalidParameterException](ctx, propagationTimeout, func() (interface{}, error) { + return conn.CreateFargateProfile(ctx, input) + }, "Misconfigured PodExecutionRole Trust Policy") if err != nil { return sdkdiag.AppendErrorf(diags, "creating EKS Fargate Profile (%s): %s", profileID, err) @@ -155,10 +144,8 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m d.SetId(profileID) - _, err = waitFargateProfileCreated(ctx, conn, clusterName, fargateProfileName, d.Timeout(schema.TimeoutCreate)) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EKS Fargate Profile (%s) to create: %s", d.Id(), err) + if _, err := waitFargateProfileCreated(ctx, conn, clusterName, fargateProfileName, d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for EKS Fargate Profile (%s) create: %s", d.Id(), err) } return append(diags, resourceFargateProfileRead(ctx, d, meta)...) @@ -166,15 +153,14 @@ func resourceFargateProfileCreate(ctx context.Context, d *schema.ResourceData, m func resourceFargateProfileRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, fargateProfileName, err := FargateProfileParseResourceID(d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EKS Fargate Profile (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - fargateProfile, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) + fargateProfile, err := findFargateProfileByTwoPartKey(ctx, conn, clusterName, fargateProfileName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Fargate Profile (%s) not found, removing from state", d.Id()) @@ -190,16 +176,11 @@ func resourceFargateProfileRead(ctx context.Context, d *schema.ResourceData, met d.Set("cluster_name", fargateProfile.ClusterName) d.Set("fargate_profile_name", fargateProfile.FargateProfileName) d.Set("pod_execution_role_arn", fargateProfile.PodExecutionRoleArn) - if err := d.Set("selector", flattenFargateProfileSelectors(fargateProfile.Selectors)); err != nil { return sdkdiag.AppendErrorf(diags, "setting selector: %s", err) } - d.Set("status", fargateProfile.Status) - - if err := d.Set("subnet_ids", aws.StringValueSlice(fargateProfile.Subnets)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting subnet_ids: %s", err) - } + d.Set("subnet_ids", fargateProfile.Subnets) setTagsOut(ctx, fargateProfile.Tags) @@ -208,20 +189,17 @@ func resourceFargateProfileRead(ctx context.Context, d *schema.ResourceData, met func resourceFargateProfileUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - // Tags only. - return append(diags, resourceFargateProfileRead(ctx, d, meta)...) } func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, fargateProfileName, err := FargateProfileParseResourceID(d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting EKS Fargate Profile (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } // mutex lock for creation/deletion serialization @@ -230,12 +208,12 @@ func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, m defer conns.GlobalMutexKV.Unlock(mutexKey) log.Printf("[DEBUG] Deleting EKS Fargate Profile: %s", d.Id()) - _, err = conn.DeleteFargateProfileWithContext(ctx, &eks.DeleteFargateProfileInput{ + _, err = conn.DeleteFargateProfile(ctx, &eks.DeleteFargateProfileInput{ ClusterName: aws.String(clusterName), FargateProfileName: aws.String(fargateProfileName), }) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return diags } @@ -243,21 +221,95 @@ func resourceFargateProfileDelete(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "deleting EKS Fargate Profile (%s): %s", d.Id(), err) } - _, err = waitFargateProfileDeleted(ctx, conn, clusterName, fargateProfileName, d.Timeout(schema.TimeoutDelete)) + if _, err := waitFargateProfileDeleted(ctx, conn, clusterName, fargateProfileName, d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for EKS Fargate Profile (%s) delete: %s", d.Id(), err) + } + + return diags +} + +func findFargateProfileByTwoPartKey(ctx context.Context, conn *eks.Client, clusterName, fargateProfileName string) (*types.FargateProfile, error) { + input := &eks.DescribeFargateProfileInput{ + ClusterName: aws.String(clusterName), + FargateProfileName: aws.String(fargateProfileName), + } + + output, err := conn.DescribeFargateProfile(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting EKS Fargate Profile (%s): waiting for completion: %s", d.Id(), err) + return nil, err } - return diags + if output == nil || output.FargateProfile == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.FargateProfile, nil +} + +func statusFargateProfile(ctx context.Context, conn *eks.Client, clusterName, fargateProfileName string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findFargateProfileByTwoPartKey(ctx, conn, clusterName, fargateProfileName) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitFargateProfileCreated(ctx context.Context, conn *eks.Client, clusterName, fargateProfileName string, timeout time.Duration) (*types.FargateProfile, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.FargateProfileStatusCreating), + Target: enum.Slice(types.FargateProfileStatusActive), + Refresh: statusFargateProfile(ctx, conn, clusterName, fargateProfileName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.FargateProfile); ok { + return output, err + } + + return nil, err +} + +func waitFargateProfileDeleted(ctx context.Context, conn *eks.Client, clusterName, fargateProfileName string, timeout time.Duration) (*types.FargateProfile, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.FargateProfileStatusActive, types.FargateProfileStatusDeleting), + Target: []string{}, + Refresh: statusFargateProfile(ctx, conn, clusterName, fargateProfileName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.FargateProfile); ok { + return output, err + } + + return nil, err } -func expandFargateProfileSelectors(l []interface{}) []*eks.FargateProfileSelector { +func expandFargateProfileSelectors(l []interface{}) []types.FargateProfileSelector { if len(l) == 0 { return nil } - fargateProfileSelectors := make([]*eks.FargateProfileSelector, 0, len(l)) + fargateProfileSelectors := make([]types.FargateProfileSelector, 0, len(l)) for _, mRaw := range l { m, ok := mRaw.(map[string]interface{}) @@ -266,10 +318,10 @@ func expandFargateProfileSelectors(l []interface{}) []*eks.FargateProfileSelecto continue } - fargateProfileSelector := &eks.FargateProfileSelector{} + fargateProfileSelector := types.FargateProfileSelector{} if v, ok := m["labels"].(map[string]interface{}); ok && len(v) > 0 { - fargateProfileSelector.Labels = flex.ExpandStringMap(v) + fargateProfileSelector.Labels = flex.ExpandStringValueMap(v) } if v, ok := m["namespace"].(string); ok && v != "" { @@ -282,7 +334,7 @@ func expandFargateProfileSelectors(l []interface{}) []*eks.FargateProfileSelecto return fargateProfileSelectors } -func flattenFargateProfileSelectors(fargateProfileSelectors []*eks.FargateProfileSelector) []map[string]interface{} { +func flattenFargateProfileSelectors(fargateProfileSelectors []types.FargateProfileSelector) []map[string]interface{} { if len(fargateProfileSelectors) == 0 { return []map[string]interface{}{} } @@ -291,8 +343,8 @@ func flattenFargateProfileSelectors(fargateProfileSelectors []*eks.FargateProfil for _, fargateProfileSelector := range fargateProfileSelectors { m := map[string]interface{}{ - "labels": aws.StringValueMap(fargateProfileSelector.Labels), - "namespace": aws.StringValue(fargateProfileSelector.Namespace), + "labels": fargateProfileSelector.Labels, + "namespace": aws.ToString(fargateProfileSelector.Namespace), } l = append(l, m) diff --git a/internal/service/eks/find.go b/internal/service/eks/find.go index 457cd8d4da1..7ab037191ff 100644 --- a/internal/service/eks/find.go +++ b/internal/service/eks/find.go @@ -12,35 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" ) -func FindFargateProfileByClusterNameAndFargateProfileName(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string) (*eks.FargateProfile, error) { - input := &eks.DescribeFargateProfileInput{ - ClusterName: aws.String(clusterName), - FargateProfileName: aws.String(fargateProfileName), - } - - output, err := conn.DescribeFargateProfileWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.FargateProfile == nil { - return nil, &retry.NotFoundError{ - Message: "Empty result", - LastRequest: input, - } - } - - return output.FargateProfile, nil -} - func FindNodegroupByClusterNameAndNodegroupName(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string) (*eks.Nodegroup, error) { input := &eks.DescribeNodegroupInput{ ClusterName: aws.String(clusterName), diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 336b8e608cd..6b46d89ce16 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -74,7 +74,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceFargateProfile, + Factory: resourceFargateProfile, TypeName: "aws_eks_fargate_profile", Name: "Fargate Profile", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/eks/status.go b/internal/service/eks/status.go index fd3c09033f1..2d0fe6c2c89 100644 --- a/internal/service/eks/status.go +++ b/internal/service/eks/status.go @@ -12,22 +12,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func statusFargateProfile(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - func statusNodegroup(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) diff --git a/internal/service/eks/wait.go b/internal/service/eks/wait.go index fce4627c7ab..87621066385 100644 --- a/internal/service/eks/wait.go +++ b/internal/service/eks/wait.go @@ -17,40 +17,6 @@ const ( clusterDeleteRetryTimeout = 60 * time.Minute ) -func waitFargateProfileCreated(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string, timeout time.Duration) (*eks.FargateProfile, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.FargateProfileStatusCreating}, - Target: []string{eks.FargateProfileStatusActive}, - Refresh: statusFargateProfile(ctx, conn, clusterName, fargateProfileName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.FargateProfile); ok { - return output, err - } - - return nil, err -} - -func waitFargateProfileDeleted(ctx context.Context, conn *eks.EKS, clusterName, fargateProfileName string, timeout time.Duration) (*eks.FargateProfile, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.FargateProfileStatusActive, eks.FargateProfileStatusDeleting}, - Target: []string{}, - Refresh: statusFargateProfile(ctx, conn, clusterName, fargateProfileName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.FargateProfile); ok { - return output, err - } - - return nil, err -} - func waitNodegroupCreated(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string, timeout time.Duration) (*eks.Nodegroup, error) { stateConf := &retry.StateChangeConf{ Pending: []string{eks.NodegroupStatusCreating}, From a9e5e2db76a3b95813f75257837d9881debc5146 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 15:03:46 -0500 Subject: [PATCH 32/46] r/aws_eks_node_group: Migrate to AWS SDK for Go v2. --- internal/service/eks/cluster.go | 5 +- internal/service/eks/errors.go | 66 ---- internal/service/eks/find.go | 72 ---- internal/service/eks/node_group.go | 414 +++++++++++++------- internal/service/eks/service_package_gen.go | 2 +- internal/service/eks/status.go | 45 --- internal/service/eks/wait.go | 81 ---- 7 files changed, 274 insertions(+), 411 deletions(-) delete mode 100644 internal/service/eks/errors.go delete mode 100644 internal/service/eks/find.go delete mode 100644 internal/service/eks/status.go delete mode 100644 internal/service/eks/wait.go diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index fe8b88b8364..edbf9f9caec 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -529,7 +529,10 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int // If a cluster is scaling up due to load a delete request will fail // This is a temporary workaround until EKS supports multiple parallel mutating operations - err := tfresource.Retry(ctx, clusterDeleteRetryTimeout, func() *retry.RetryError { + const ( + timeout = 60 * time.Minute + ) + err := tfresource.Retry(ctx, timeout, func() *retry.RetryError { var err error _, err = conn.DeleteClusterWithContext(ctx, input) diff --git a/internal/service/eks/errors.go b/internal/service/eks/errors.go deleted file mode 100644 index 3a992d1c240..00000000000 --- a/internal/service/eks/errors.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package eks - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/eks" - multierror "github.com/hashicorp/go-multierror" -) - -func ErrorDetailError(apiObject *eks.ErrorDetail) error { - if apiObject == nil { - return nil - } - - return awserr.New(aws.StringValue(apiObject.ErrorCode), aws.StringValue(apiObject.ErrorMessage), nil) -} - -func ErrorDetailsError(apiObjects []*eks.ErrorDetail) error { - var errors *multierror.Error - - for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - err := ErrorDetailError(apiObject) - - if err != nil { - errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(aws.StringValueSlice(apiObject.ResourceIds), ", "), err)) - } - } - - return errors.ErrorOrNil() -} - -func IssueError(apiObject *eks.Issue) error { - if apiObject == nil { - return nil - } - - return awserr.New(aws.StringValue(apiObject.Code), aws.StringValue(apiObject.Message), nil) -} - -func IssuesError(apiObjects []*eks.Issue) error { - var errors *multierror.Error - - for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - err := IssueError(apiObject) - - if err != nil { - errors = multierror.Append(errors, fmt.Errorf("%s: %w", strings.Join(aws.StringValueSlice(apiObject.ResourceIds), ", "), err)) - } - } - - return errors.ErrorOrNil() -} diff --git a/internal/service/eks/find.go b/internal/service/eks/find.go deleted file mode 100644 index 7ab037191ff..00000000000 --- a/internal/service/eks/find.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package eks - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -func FindNodegroupByClusterNameAndNodegroupName(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string) (*eks.Nodegroup, error) { - input := &eks.DescribeNodegroupInput{ - ClusterName: aws.String(clusterName), - NodegroupName: aws.String(nodeGroupName), - } - - output, err := conn.DescribeNodegroupWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Nodegroup == nil { - return nil, &retry.NotFoundError{ - Message: "Empty result", - LastRequest: input, - } - } - - return output.Nodegroup, nil -} - -func FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName, id string) (*eks.Update, error) { - input := &eks.DescribeUpdateInput{ - Name: aws.String(clusterName), - NodegroupName: aws.String(nodeGroupName), - UpdateId: aws.String(id), - } - - output, err := conn.DescribeUpdateWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Update == nil { - return nil, &retry.NotFoundError{ - Message: "Empty result", - LastRequest: input, - } - } - - return output.Update, nil -} diff --git a/internal/service/eks/node_group.go b/internal/service/eks/node_group.go index ad1a8af448b..c092185a9b3 100644 --- a/internal/service/eks/node_group.go +++ b/internal/service/eks/node_group.go @@ -5,19 +5,25 @@ package eks import ( "context" + "errors" + "fmt" "log" "reflect" + "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -27,12 +33,13 @@ import ( // @SDKResource("aws_eks_node_group", name="Node Group") // @Tags(identifierAttribute="arn") -func ResourceNodeGroup() *schema.Resource { +func resourceNodeGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceNodeGroupCreate, ReadWithoutTimeout: resourceNodeGroupRead, UpdateWithoutTimeout: resourceNodeGroupUpdate, DeleteWithoutTimeout: resourceNodeGroupDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -47,22 +54,22 @@ func ResourceNodeGroup() *schema.Resource { Schema: map[string]*schema.Schema{ "ami_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(eks.AMITypes_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.AMITypes](), }, "arn": { Type: schema.TypeString, Computed: true, }, "capacity_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(eks.CapacityTypes_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.CapacityTypes](), }, "cluster_name": { Type: schema.TypeString, @@ -248,9 +255,9 @@ func ResourceNodeGroup() *schema.Resource { ValidateFunc: validation.StringLenBetween(0, 63), }, "effect": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(eks.TaintEffect_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.TaintEffect](), }, }, }, @@ -293,7 +300,7 @@ func ResourceNodeGroup() *schema.Resource { } func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName := d.Get("cluster_name").(string) nodeGroupName := create.Name(d.Get("node_group_name").(string), d.Get("node_group_name_prefix").(string)) @@ -303,28 +310,28 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i ClusterName: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), NodeRole: aws.String(d.Get("node_role_arn").(string)), - Subnets: flex.ExpandStringSet(d.Get("subnet_ids").(*schema.Set)), + Subnets: flex.ExpandStringValueSet(d.Get("subnet_ids").(*schema.Set)), Tags: getTagsIn(ctx), } if v, ok := d.GetOk("ami_type"); ok { - input.AmiType = aws.String(v.(string)) + input.AmiType = types.AMITypes(v.(string)) } if v, ok := d.GetOk("capacity_type"); ok { - input.CapacityType = aws.String(v.(string)) + input.CapacityType = types.CapacityTypes(v.(string)) } if v, ok := d.GetOk("disk_size"); ok { - input.DiskSize = aws.Int64(int64(v.(int))) + input.DiskSize = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("instance_types"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.InstanceTypes = flex.ExpandStringList(v.([]interface{})) + input.InstanceTypes = flex.ExpandStringValueList(v.([]interface{})) } if v := d.Get("labels").(map[string]interface{}); len(v) > 0 { - input.Labels = flex.ExpandStringMap(v) + input.Labels = flex.ExpandStringValueMap(v) } if v := d.Get("launch_template").([]interface{}); len(v) > 0 { @@ -355,7 +362,7 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i input.Version = aws.String(v.(string)) } - _, err := conn.CreateNodegroupWithContext(ctx, input) + _, err := conn.CreateNodegroup(ctx, input) if err != nil { return diag.Errorf("creating EKS Node Group (%s): %s", groupID, err) @@ -363,25 +370,22 @@ func resourceNodeGroupCreate(ctx context.Context, d *schema.ResourceData, meta i d.SetId(groupID) - _, err = waitNodegroupCreated(ctx, conn, clusterName, nodeGroupName, d.Timeout(schema.TimeoutCreate)) - - if err != nil { - return diag.Errorf("waiting for EKS Node Group (%s) to create: %s", d.Id(), err) + if _, err := waitNodegroupCreated(ctx, conn, clusterName, nodeGroupName, d.Timeout(schema.TimeoutCreate)); err != nil { + return diag.Errorf("waiting for EKS Node Group (%s) create: %s", d.Id(), err) } return resourceNodeGroupRead(ctx, d, meta) } func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, nodeGroupName, err := NodeGroupParseResourceID(d.Id()) - if err != nil { return diag.FromErr(err) } - nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) + nodeGroup, err := findNodegroupByTwoPartKey(ctx, conn, clusterName, nodeGroupName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Node Group (%s) not found, removing from state", d.Id()) @@ -398,32 +402,21 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("capacity_type", nodeGroup.CapacityType) d.Set("cluster_name", nodeGroup.ClusterName) d.Set("disk_size", nodeGroup.DiskSize) - - if err := d.Set("instance_types", aws.StringValueSlice(nodeGroup.InstanceTypes)); err != nil { - return diag.Errorf("setting instance_types: %s", err) - } - - if err := d.Set("labels", aws.StringValueMap(nodeGroup.Labels)); err != nil { - return diag.Errorf("setting labels: %s", err) - } - + d.Set("instance_types", nodeGroup.InstanceTypes) + d.Set("labels", nodeGroup.Labels) if err := d.Set("launch_template", flattenLaunchTemplateSpecification(nodeGroup.LaunchTemplate)); err != nil { return diag.Errorf("setting launch_template: %s", err) } - d.Set("node_group_name", nodeGroup.NodegroupName) - d.Set("node_group_name_prefix", create.NamePrefixFromName(aws.StringValue(nodeGroup.NodegroupName))) + d.Set("node_group_name_prefix", create.NamePrefixFromName(aws.ToString(nodeGroup.NodegroupName))) d.Set("node_role_arn", nodeGroup.NodeRole) d.Set("release_version", nodeGroup.ReleaseVersion) - if err := d.Set("remote_access", flattenRemoteAccessConfig(nodeGroup.RemoteAccess)); err != nil { return diag.Errorf("setting remote_access: %s", err) } - if err := d.Set("resources", flattenNodeGroupResources(nodeGroup.Resources)); err != nil { return diag.Errorf("setting resources: %s", err) } - if nodeGroup.ScalingConfig != nil { if err := d.Set("scaling_config", []interface{}{flattenNodeGroupScalingConfig(nodeGroup.ScalingConfig)}); err != nil { return diag.Errorf("setting scaling_config: %s", err) @@ -431,17 +424,11 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int } else { d.Set("scaling_config", nil) } - d.Set("status", nodeGroup.Status) - - if err := d.Set("subnet_ids", aws.StringValueSlice(nodeGroup.Subnets)); err != nil { - return diag.Errorf("setting subnets: %s", err) - } - + d.Set("subnet_ids", nodeGroup.Subnets) if err := d.Set("taint", flattenTaints(nodeGroup.Taints)); err != nil { return diag.Errorf("setting taint: %s", err) } - if nodeGroup.UpdateConfig != nil { if err := d.Set("update_config", []interface{}{flattenNodeGroupUpdateConfig(nodeGroup.UpdateConfig)}); err != nil { return diag.Errorf("setting update_config: %s", err) @@ -449,7 +436,6 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int } else { d.Set("update_config", nil) } - d.Set("version", nodeGroup.Version) setTagsOut(ctx, nodeGroup.Tags) @@ -458,10 +444,9 @@ func resourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta int } func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, nodeGroupName, err := NodeGroupParseResourceID(d.Id()) - if err != nil { return diag.FromErr(err) } @@ -471,7 +456,7 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i input := &eks.UpdateNodegroupVersionInput{ ClientRequestToken: aws.String(id.UniqueId()), ClusterName: aws.String(clusterName), - Force: aws.Bool(d.Get("force_update_version").(bool)), + Force: d.Get("force_update_version").(bool), NodegroupName: aws.String(nodeGroupName), } @@ -503,17 +488,15 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i input.Version = aws.String(v.(string)) } - output, err := conn.UpdateNodegroupVersionWithContext(ctx, input) + output, err := conn.UpdateNodegroupVersion(ctx, input) if err != nil { return diag.Errorf("updating EKS Node Group (%s) version: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) - - _, err = waitNodegroupUpdateSuccessful(ctx, conn, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) + updateID := aws.ToString(output.Update.Id) - if err != nil { + if _, err := waitNodegroupUpdateSuccessful(ctx, conn, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("waiting for EKS Node Group (%s) version update (%s): %s", d.Id(), updateID, err) } } @@ -542,17 +525,15 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i } } - output, err := conn.UpdateNodegroupConfigWithContext(ctx, input) + output, err := conn.UpdateNodegroupConfig(ctx, input) if err != nil { return diag.Errorf("updating EKS Node Group (%s) config: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) - - _, err = waitNodegroupUpdateSuccessful(ctx, conn, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)) + updateID := aws.ToString(output.Update.Id) - if err != nil { + if _, err := waitNodegroupUpdateSuccessful(ctx, conn, clusterName, nodeGroupName, updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("waiting for EKS Node Group (%s) config update (%s): %s", d.Id(), updateID, err) } } @@ -561,21 +542,20 @@ func resourceNodeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i } func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) clusterName, nodeGroupName, err := NodeGroupParseResourceID(d.Id()) - if err != nil { return diag.FromErr(err) } log.Printf("[DEBUG] Deleting EKS Node Group: %s", d.Id()) - _, err = conn.DeleteNodegroupWithContext(ctx, &eks.DeleteNodegroupInput{ + _, err = conn.DeleteNodegroup(ctx, &eks.DeleteNodegroupInput{ ClusterName: aws.String(clusterName), NodegroupName: aws.String(nodeGroupName), }) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil } @@ -583,23 +563,187 @@ func resourceNodeGroupDelete(ctx context.Context, d *schema.ResourceData, meta i return diag.Errorf("deleting EKS Node Group (%s): %s", d.Id(), err) } - _, err = waitNodegroupDeleted(ctx, conn, clusterName, nodeGroupName, d.Timeout(schema.TimeoutDelete)) + if _, err := waitNodegroupDeleted(ctx, conn, clusterName, nodeGroupName, d.Timeout(schema.TimeoutDelete)); err != nil { + return diag.Errorf("waiting for EKS Node Group (%s) delete: %s", d.Id(), err) + } + + return nil +} + +func findNodegroupByTwoPartKey(ctx context.Context, conn *eks.Client, clusterName, nodeGroupName string) (*types.Nodegroup, error) { + input := &eks.DescribeNodegroupInput{ + ClusterName: aws.String(clusterName), + NodegroupName: aws.String(nodeGroupName), + } + + output, err := conn.DescribeNodegroup(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } if err != nil { - return diag.Errorf("waiting for EKS Node Group (%s) to delete: %s", d.Id(), err) + return nil, err } - return nil + if output == nil || output.Nodegroup == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Nodegroup, nil +} + +func findNodegroupUpdateByThreePartKey(ctx context.Context, conn *eks.Client, clusterName, nodeGroupName, id string) (*types.Update, error) { + input := &eks.DescribeUpdateInput{ + Name: aws.String(clusterName), + NodegroupName: aws.String(nodeGroupName), + UpdateId: aws.String(id), + } + + output, err := conn.DescribeUpdate(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Update == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Update, nil } -func expandLaunchTemplateSpecification(l []interface{}) *eks.LaunchTemplateSpecification { +func statusNodegroup(ctx context.Context, conn *eks.Client, clusterName, nodeGroupName string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findNodegroupByTwoPartKey(ctx, conn, clusterName, nodeGroupName) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func statusNodegroupUpdate(ctx context.Context, conn *eks.Client, clusterName, nodeGroupName, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findNodegroupUpdateByThreePartKey(ctx, conn, clusterName, nodeGroupName, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitNodegroupCreated(ctx context.Context, conn *eks.Client, clusterName, nodeGroupName string, timeout time.Duration) (*types.Nodegroup, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.NodegroupStatusCreating), + Target: enum.Slice(types.NodegroupStatusActive), + Refresh: statusNodegroup(ctx, conn, clusterName, nodeGroupName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Nodegroup); ok { + if status, health := output.Status, output.Health; status == types.NodegroupStatusCreateFailed && health != nil { + tfresource.SetLastError(err, issuesError(health.Issues)) + } + + return output, err + } + + return nil, err +} + +func waitNodegroupDeleted(ctx context.Context, conn *eks.Client, clusterName, nodeGroupName string, timeout time.Duration) (*types.Nodegroup, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.NodegroupStatusActive, types.NodegroupStatusDeleting), + Target: []string{}, + Refresh: statusNodegroup(ctx, conn, clusterName, nodeGroupName), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Nodegroup); ok { + if status, health := output.Status, output.Health; status == types.NodegroupStatusDeleteFailed && health != nil { + tfresource.SetLastError(err, issuesError(health.Issues)) + } + + return output, err + } + + return nil, err +} + +func waitNodegroupUpdateSuccessful(ctx context.Context, conn *eks.Client, clusterName, nodeGroupName, id string, timeout time.Duration) (*types.Update, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.UpdateStatusInProgress), + Target: enum.Slice(types.UpdateStatusSuccessful), + Refresh: statusNodegroupUpdate(ctx, conn, clusterName, nodeGroupName, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Update); ok { + if status := output.Status; status == types.UpdateStatusCancelled || status == types.UpdateStatusFailed { + tfresource.SetLastError(err, errorDetailsError(output.Errors)) + } + + return output, err + } + + return nil, err +} + +func issueError(apiObject types.Issue) error { + return fmt.Errorf("%s: %s", apiObject.Code, aws.ToString(apiObject.Message)) +} + +func issuesError(apiObjects []types.Issue) error { + var errs []error + + for _, apiObject := range apiObjects { + err := issueError(apiObject) + + if err != nil { + errs = append(errs, fmt.Errorf("%s: %w", strings.Join(apiObject.ResourceIds, ", "), err)) + } + } + + return errors.Join(errs...) +} + +func expandLaunchTemplateSpecification(l []interface{}) *types.LaunchTemplateSpecification { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - config := &eks.LaunchTemplateSpecification{} + config := &types.LaunchTemplateSpecification{} if v, ok := m["id"].(string); ok && v != "" { config.Id = aws.String(v) @@ -616,34 +760,34 @@ func expandLaunchTemplateSpecification(l []interface{}) *eks.LaunchTemplateSpeci return config } -func expandNodegroupScalingConfig(tfMap map[string]interface{}) *eks.NodegroupScalingConfig { +func expandNodegroupScalingConfig(tfMap map[string]interface{}) *types.NodegroupScalingConfig { if tfMap == nil { return nil } - apiObject := &eks.NodegroupScalingConfig{} + apiObject := &types.NodegroupScalingConfig{} if v, ok := tfMap["desired_size"].(int); ok { - apiObject.DesiredSize = aws.Int64(int64(v)) + apiObject.DesiredSize = aws.Int32(int32(v)) } if v, ok := tfMap["max_size"].(int); ok && v != 0 { - apiObject.MaxSize = aws.Int64(int64(v)) + apiObject.MaxSize = aws.Int32(int32(v)) } if v, ok := tfMap["min_size"].(int); ok { - apiObject.MinSize = aws.Int64(int64(v)) + apiObject.MinSize = aws.Int32(int32(v)) } return apiObject } -func expandTaints(l []interface{}) []*eks.Taint { +func expandTaints(l []interface{}) []types.Taint { if len(l) == 0 { return nil } - var taints []*eks.Taint + var taints []types.Taint for _, raw := range l { t, ok := raw.(map[string]interface{}) @@ -652,7 +796,7 @@ func expandTaints(l []interface{}) []*eks.Taint { continue } - taint := &eks.Taint{} + taint := types.Taint{} if k, ok := t["key"].(string); ok { taint.Key = aws.String(k) @@ -663,7 +807,7 @@ func expandTaints(l []interface{}) []*eks.Taint { } if e, ok := t["effect"].(string); ok { - taint.Effect = aws.String(e) + taint.Effect = types.TaintEffect(e) } taints = append(taints, taint) @@ -672,25 +816,16 @@ func expandTaints(l []interface{}) []*eks.Taint { return taints } -func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *eks.UpdateTaintsPayload { +func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *types.UpdateTaintsPayload { oldTaints := expandTaints(oldTaintsRaw) newTaints := expandTaints(newTaintsRaw) - var removedTaints []*eks.Taint + var removedTaints []types.Taint for _, ot := range oldTaints { - if ot == nil { - continue - } - removed := true for _, nt := range newTaints { - if nt == nil { - continue - } - - // if both taint.key and taint.effect are the same, we don't need to remove it. - if aws.StringValue(nt.Key) == aws.StringValue(ot.Key) && - aws.StringValue(nt.Effect) == aws.StringValue(ot.Effect) { + // If both taint.key and taint.effect are the same, we don't need to remove it. + if aws.ToString(nt.Key) == aws.ToString(ot.Key) && nt.Effect == ot.Effect { removed = false break } @@ -701,23 +836,16 @@ func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *eks.Up } } - var updatedTaints []*eks.Taint + var updatedTaints []types.Taint for _, nt := range newTaints { - if nt == nil { - continue - } - updated := true for _, ot := range oldTaints { - if nt == nil { - continue - } - if reflect.DeepEqual(nt, ot) { updated = false break } } + if updated { updatedTaints = append(updatedTaints, nt) } @@ -727,7 +855,7 @@ func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *eks.Up return nil } - updateTaintsPayload := &eks.UpdateTaintsPayload{} + updateTaintsPayload := &types.UpdateTaintsPayload{} if len(removedTaints) > 0 { updateTaintsPayload.RemoveTaints = removedTaints @@ -740,45 +868,45 @@ func expandUpdateTaintsPayload(oldTaintsRaw, newTaintsRaw []interface{}) *eks.Up return updateTaintsPayload } -func expandRemoteAccessConfig(l []interface{}) *eks.RemoteAccessConfig { +func expandRemoteAccessConfig(l []interface{}) *types.RemoteAccessConfig { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - config := &eks.RemoteAccessConfig{} + config := &types.RemoteAccessConfig{} if v, ok := m["ec2_ssh_key"].(string); ok && v != "" { config.Ec2SshKey = aws.String(v) } if v, ok := m["source_security_group_ids"].(*schema.Set); ok && v.Len() > 0 { - config.SourceSecurityGroups = flex.ExpandStringSet(v) + config.SourceSecurityGroups = flex.ExpandStringValueSet(v) } return config } -func expandNodegroupUpdateConfig(tfMap map[string]interface{}) *eks.NodegroupUpdateConfig { +func expandNodegroupUpdateConfig(tfMap map[string]interface{}) *types.NodegroupUpdateConfig { if tfMap == nil { return nil } - apiObject := &eks.NodegroupUpdateConfig{} + apiObject := &types.NodegroupUpdateConfig{} if v, ok := tfMap["max_unavailable"].(int); ok && v != 0 { - apiObject.MaxUnavailable = aws.Int64(int64(v)) + apiObject.MaxUnavailable = aws.Int32(int32(v)) } if v, ok := tfMap["max_unavailable_percentage"].(int); ok && v != 0 { - apiObject.MaxUnavailablePercentage = aws.Int64(int64(v)) + apiObject.MaxUnavailablePercentage = aws.Int32(int32(v)) } return apiObject } -func expandUpdateLabelsPayload(ctx context.Context, oldLabelsMap, newLabelsMap interface{}) *eks.UpdateLabelsPayload { +func expandUpdateLabelsPayload(ctx context.Context, oldLabelsMap, newLabelsMap interface{}) *types.UpdateLabelsPayload { // EKS Labels operate similarly to keyvaluetags oldLabels := tftags.New(ctx, oldLabelsMap) newLabels := tftags.New(ctx, newLabelsMap) @@ -790,20 +918,20 @@ func expandUpdateLabelsPayload(ctx context.Context, oldLabelsMap, newLabelsMap i return nil } - updateLabelsPayload := &eks.UpdateLabelsPayload{} + updateLabelsPayload := &types.UpdateLabelsPayload{} if len(removedLabels) > 0 { - updateLabelsPayload.RemoveLabels = aws.StringSlice(removedLabels.Keys()) + updateLabelsPayload.RemoveLabels = removedLabels.Keys() } if len(updatedLabels) > 0 { - updateLabelsPayload.AddOrUpdateLabels = aws.StringMap(updatedLabels.Map()) + updateLabelsPayload.AddOrUpdateLabels = updatedLabels.Map() } return updateLabelsPayload } -func flattenAutoScalingGroups(autoScalingGroups []*eks.AutoScalingGroup) []map[string]interface{} { +func flattenAutoScalingGroups(autoScalingGroups []types.AutoScalingGroup) []map[string]interface{} { if len(autoScalingGroups) == 0 { return []map[string]interface{}{} } @@ -812,7 +940,7 @@ func flattenAutoScalingGroups(autoScalingGroups []*eks.AutoScalingGroup) []map[s for _, autoScalingGroup := range autoScalingGroups { m := map[string]interface{}{ - "name": aws.StringValue(autoScalingGroup.Name), + "name": aws.ToString(autoScalingGroup.Name), } l = append(l, m) @@ -821,7 +949,7 @@ func flattenAutoScalingGroups(autoScalingGroups []*eks.AutoScalingGroup) []map[s return l } -func flattenLaunchTemplateSpecification(config *eks.LaunchTemplateSpecification) []map[string]interface{} { +func flattenLaunchTemplateSpecification(config *types.LaunchTemplateSpecification) []map[string]interface{} { if config == nil { return nil } @@ -829,34 +957,34 @@ func flattenLaunchTemplateSpecification(config *eks.LaunchTemplateSpecification) m := map[string]interface{}{} if v := config.Id; v != nil { - m["id"] = aws.StringValue(v) + m["id"] = aws.ToString(v) } if v := config.Name; v != nil { - m["name"] = aws.StringValue(v) + m["name"] = aws.ToString(v) } if v := config.Version; v != nil { - m["version"] = aws.StringValue(v) + m["version"] = aws.ToString(v) } return []map[string]interface{}{m} } -func flattenNodeGroupResources(resources *eks.NodegroupResources) []map[string]interface{} { +func flattenNodeGroupResources(resources *types.NodegroupResources) []map[string]interface{} { if resources == nil { return []map[string]interface{}{} } m := map[string]interface{}{ "autoscaling_groups": flattenAutoScalingGroups(resources.AutoScalingGroups), - "remote_access_security_group_id": aws.StringValue(resources.RemoteAccessSecurityGroup), + "remote_access_security_group_id": aws.ToString(resources.RemoteAccessSecurityGroup), } return []map[string]interface{}{m} } -func flattenNodeGroupScalingConfig(apiObject *eks.NodegroupScalingConfig) map[string]interface{} { +func flattenNodeGroupScalingConfig(apiObject *types.NodegroupScalingConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -864,21 +992,21 @@ func flattenNodeGroupScalingConfig(apiObject *eks.NodegroupScalingConfig) map[st tfMap := map[string]interface{}{} if v := apiObject.DesiredSize; v != nil { - tfMap["desired_size"] = aws.Int64Value(v) + tfMap["desired_size"] = aws.ToInt32(v) } if v := apiObject.MaxSize; v != nil { - tfMap["max_size"] = aws.Int64Value(v) + tfMap["max_size"] = aws.ToInt32(v) } if v := apiObject.MinSize; v != nil { - tfMap["min_size"] = aws.Int64Value(v) + tfMap["min_size"] = aws.ToInt32(v) } return tfMap } -func flattenNodeGroupUpdateConfig(apiObject *eks.NodegroupUpdateConfig) map[string]interface{} { +func flattenNodeGroupUpdateConfig(apiObject *types.NodegroupUpdateConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -886,30 +1014,30 @@ func flattenNodeGroupUpdateConfig(apiObject *eks.NodegroupUpdateConfig) map[stri tfMap := map[string]interface{}{} if v := apiObject.MaxUnavailable; v != nil { - tfMap["max_unavailable"] = aws.Int64Value(v) + tfMap["max_unavailable"] = aws.ToInt32(v) } if v := apiObject.MaxUnavailablePercentage; v != nil { - tfMap["max_unavailable_percentage"] = aws.Int64Value(v) + tfMap["max_unavailable_percentage"] = aws.ToInt32(v) } return tfMap } -func flattenRemoteAccessConfig(config *eks.RemoteAccessConfig) []map[string]interface{} { +func flattenRemoteAccessConfig(config *types.RemoteAccessConfig) []map[string]interface{} { if config == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "ec2_ssh_key": aws.StringValue(config.Ec2SshKey), - "source_security_group_ids": aws.StringValueSlice(config.SourceSecurityGroups), + "ec2_ssh_key": aws.ToString(config.Ec2SshKey), + "source_security_group_ids": config.SourceSecurityGroups, } return []map[string]interface{}{m} } -func flattenTaints(taints []*eks.Taint) []interface{} { +func flattenTaints(taints []types.Taint) []interface{} { if len(taints) == 0 { return nil } @@ -917,14 +1045,10 @@ func flattenTaints(taints []*eks.Taint) []interface{} { var results []interface{} for _, taint := range taints { - if taint == nil { - continue - } - t := make(map[string]interface{}) - t["key"] = aws.StringValue(taint.Key) - t["value"] = aws.StringValue(taint.Value) - t["effect"] = aws.StringValue(taint.Effect) + t["key"] = aws.ToString(taint.Key) + t["value"] = aws.ToString(taint.Value) + t["effect"] = taint.Effect results = append(results, t) } diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 6b46d89ce16..2bd7bc6771a 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -90,7 +90,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceNodeGroup, + Factory: resourceNodeGroup, TypeName: "aws_eks_node_group", Name: "Node Group", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/eks/status.go b/internal/service/eks/status.go deleted file mode 100644 index 2d0fe6c2c89..00000000000 --- a/internal/service/eks/status.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package eks - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func statusNodegroup(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - -func statusNodegroupUpdate(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindNodegroupUpdateByClusterNameNodegroupNameAndID(ctx, conn, clusterName, nodeGroupName, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} diff --git a/internal/service/eks/wait.go b/internal/service/eks/wait.go deleted file mode 100644 index 87621066385..00000000000 --- a/internal/service/eks/wait.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package eks - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -const ( - clusterDeleteRetryTimeout = 60 * time.Minute -) - -func waitNodegroupCreated(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string, timeout time.Duration) (*eks.Nodegroup, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.NodegroupStatusCreating}, - Target: []string{eks.NodegroupStatusActive}, - Refresh: statusNodegroup(ctx, conn, clusterName, nodeGroupName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Nodegroup); ok { - if status, health := aws.StringValue(output.Status), output.Health; status == eks.NodegroupStatusCreateFailed && health != nil { - tfresource.SetLastError(err, IssuesError(health.Issues)) - } - - return output, err - } - - return nil, err -} - -func waitNodegroupDeleted(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName string, timeout time.Duration) (*eks.Nodegroup, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.NodegroupStatusActive, eks.NodegroupStatusDeleting}, - Target: []string{}, - Refresh: statusNodegroup(ctx, conn, clusterName, nodeGroupName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Nodegroup); ok { - if status, health := aws.StringValue(output.Status), output.Health; status == eks.NodegroupStatusDeleteFailed && health != nil { - tfresource.SetLastError(err, IssuesError(health.Issues)) - } - - return output, err - } - - return nil, err -} - -func waitNodegroupUpdateSuccessful(ctx context.Context, conn *eks.EKS, clusterName, nodeGroupName, id string, timeout time.Duration) (*eks.Update, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{eks.UpdateStatusInProgress}, - Target: []string{eks.UpdateStatusSuccessful}, - Refresh: statusNodegroupUpdate(ctx, conn, clusterName, nodeGroupName, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*eks.Update); ok { - if status := aws.StringValue(output.Status); status == eks.UpdateStatusCancelled || status == eks.UpdateStatusFailed { - tfresource.SetLastError(err, ErrorDetailsError(output.Errors)) - } - - return output, err - } - - return nil, err -} From 6ce7e1eb6e01b2fc8dc2be6b712e3f656b6319d9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 15:08:29 -0500 Subject: [PATCH 33/46] d/aws_eks_node_group: Migrate to AWS SDK for Go v2. --- internal/service/eks/node_group_data_source.go | 10 ++++------ internal/service/eks/service_package_gen.go | 2 +- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/internal/service/eks/node_group_data_source.go b/internal/service/eks/node_group_data_source.go index 32cbdf4af4c..613b5d8557f 100644 --- a/internal/service/eks/node_group_data_source.go +++ b/internal/service/eks/node_group_data_source.go @@ -6,7 +6,6 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -15,7 +14,7 @@ import ( ) // @SDKDataSource("aws_eks_node_group") -func DataSourceNodeGroup() *schema.Resource { +func dataSourceNodeGroup() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceNodeGroupRead, @@ -184,20 +183,19 @@ func DataSourceNodeGroup() *schema.Resource { } func dataSourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig clusterName := d.Get("cluster_name").(string) nodeGroupName := d.Get("node_group_name").(string) id := NodeGroupCreateResourceID(clusterName, nodeGroupName) - nodeGroup, err := FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) + nodeGroup, err := findNodegroupByTwoPartKey(ctx, conn, clusterName, nodeGroupName) if err != nil { return diag.Errorf("reading EKS Node Group (%s): %s", id, err) } d.SetId(id) - d.Set("ami_type", nodeGroup.AmiType) d.Set("arn", nodeGroup.NodegroupArn) d.Set("capacity_type", nodeGroup.CapacityType) @@ -225,7 +223,7 @@ func dataSourceNodeGroupRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("scaling_config", nil) } d.Set("status", nodeGroup.Status) - d.Set("subnet_ids", aws.StringValueSlice(nodeGroup.Subnets)) + d.Set("subnet_ids", nodeGroup.Subnets) if err := d.Set("taints", flattenTaints(nodeGroup.Taints)); err != nil { return diag.Errorf("setting taints: %s", err) } diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 2bd7bc6771a..f3617cdbf7e 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -45,7 +45,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_eks_clusters", }, { - Factory: DataSourceNodeGroup, + Factory: dataSourceNodeGroup, TypeName: "aws_eks_node_group", }, { From 4df1e762eda7b57185b4da2467952f175806f659 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 15:32:56 -0500 Subject: [PATCH 34/46] r/aws_eks_cluster: Migrate to AWS SDK for Go v2. --- internal/service/eks/cluster.go | 252 ++++++++++---------- internal/service/eks/service_package_gen.go | 2 +- 2 files changed, 127 insertions(+), 127 deletions(-) diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index edbf9f9caec..5b087e13a2c 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -10,15 +10,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -28,7 +30,7 @@ import ( // @SDKResource("aws_eks_cluster", name="Cluster") // @Tags(identifierAttribute="arn") -func ResourceCluster() *schema.Resource { +func resourceCluster() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceClusterCreate, ReadWithoutTimeout: resourceClusterRead, @@ -82,10 +84,9 @@ func ResourceCluster() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(eks.LogType_Values(), true), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[types.LogType](), }, - Set: schema.HashString, }, "encryption_config": { Type: schema.TypeList, @@ -151,11 +152,11 @@ func ResourceCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "ip_family": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(eks.IpFamily_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.IpFamily](), }, "service_ipv4_cidr": { Type: schema.TypeString, @@ -291,7 +292,7 @@ func ResourceCluster() *schema.Resource { } func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) name := d.Get("name").(string) input := &eks.CreateClusterInput{ @@ -317,30 +318,30 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateClusterWithContext(ctx, input) + return conn.CreateCluster(ctx, input) }, func(err error) (bool, error) { // InvalidParameterException: roleArn, arn:aws:iam::123456789012:role/XXX, does not exist - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "does not exist") { + if errs.IsAErrorMessageContains[*types.InvalidParameterException](err, "does not exist") { return true, err } // InvalidParameterException: Error in role params - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "Error in role params") { + if errs.IsAErrorMessageContains[*types.InvalidParameterException](err, "Error in role params") { return true, err } - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "Role could not be assumed because the trusted entity is not correct") { + if errs.IsAErrorMessageContains[*types.InvalidParameterException](err, "Role could not be assumed because the trusted entity is not correct") { return true, err } // InvalidParameterException: The provided role doesn't have the Amazon EKS Managed Policies associated with it. Please ensure the following policy is attached: arn:aws:iam::aws:policy/AmazonEKSClusterPolicy - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "The provided role doesn't have the Amazon EKS Managed Policies associated with it") { + if errs.IsAErrorMessageContains[*types.InvalidParameterException](err, "The provided role doesn't have the Amazon EKS Managed Policies associated with it") { return true, err } // InvalidParameterException: IAM role's policy must include the `ec2:DescribeSubnets` action - if tfawserr.ErrMessageContains(err, eks.ErrCodeInvalidParameterException, "IAM role's policy must include") { + if errs.IsAErrorMessageContains[*types.InvalidParameterException](err, "IAM role's policy must include") { return true, err } @@ -352,7 +353,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int return diag.Errorf("creating EKS Cluster (%s): %s", name, err) } - d.SetId(aws.StringValue(outputRaw.(*eks.CreateClusterOutput).Cluster.Name)) + d.SetId(aws.ToString(outputRaw.(*eks.CreateClusterOutput).Cluster.Name)) if _, err := waitClusterCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return diag.Errorf("waiting for EKS Cluster (%s) create: %s", d.Id(), err) @@ -362,9 +363,9 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) - cluster, err := FindClusterByName(ctx, conn, d.Id()) + cluster, err := findClusterByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EKS Cluster (%s) not found, removing from state", d.Id()) @@ -384,7 +385,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter if cluster.OutpostConfig != nil { d.Set("cluster_id", cluster.Id) } - d.Set("created_at", aws.TimeValue(cluster.CreatedAt).String()) + d.Set("created_at", aws.ToTime(cluster.CreatedAt).String()) if err := d.Set("enabled_cluster_log_types", flattenLogging(cluster.Logging)); err != nil { return diag.Errorf("setting enabled_cluster_log_types: %s", err) } @@ -416,7 +417,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter } func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) // Do any version update first. if d.HasChange("version") { @@ -425,13 +426,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int Version: aws.String(d.Get("version").(string)), } - output, err := conn.UpdateClusterVersionWithContext(ctx, input) + output, err := conn.UpdateClusterVersion(ctx, input) if err != nil { return diag.Errorf("updating EKS Cluster (%s) version: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) if _, err := waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("waiting for EKS Cluster (%s) version update (%s): %s", d.Id(), updateID, err) @@ -447,13 +448,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int EncryptionConfig: expandEncryptionConfig(d.Get("encryption_config").([]interface{})), } - output, err := conn.AssociateEncryptionConfigWithContext(ctx, input) + output, err := conn.AssociateEncryptionConfig(ctx, input) if err != nil { return diag.Errorf("associating EKS Cluster (%s) encryption config: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) if _, err := waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("waiting for EKS Cluster (%s) encryption config association (%s): %s", d.Id(), updateID, err) @@ -467,26 +468,26 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int Name: aws.String(d.Id()), } - output, err := conn.UpdateClusterConfigWithContext(ctx, input) + output, err := conn.UpdateClusterConfig(ctx, input) if err != nil { return diag.Errorf("updating EKS Cluster (%s) logging: %s", d.Id(), err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) if _, err := waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("waiting for EKS Cluster (%s) logging update (%s): %s", d.Id(), updateID, err) } } if d.HasChanges("vpc_config.0.endpoint_private_access", "vpc_config.0.endpoint_public_access", "vpc_config.0.public_access_cidrs") { - config := &eks.VpcConfigRequest{ + config := &types.VpcConfigRequest{ EndpointPrivateAccess: aws.Bool(d.Get("vpc_config.0.endpoint_private_access").(bool)), EndpointPublicAccess: aws.Bool(d.Get("vpc_config.0.endpoint_public_access").(bool)), } if v, ok := d.GetOk("vpc_config.0.public_access_cidrs"); ok && v.(*schema.Set).Len() > 0 { - config.PublicAccessCidrs = flex.ExpandStringSet(v.(*schema.Set)) + config.PublicAccessCidrs = flex.ExpandStringValueSet(v.(*schema.Set)) } if err := updateVPCConfig(ctx, conn, d.Id(), config, d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -496,8 +497,8 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int // API only allows one type of update at at time. if d.HasChange("vpc_config.0.subnet_ids") { - config := &eks.VpcConfigRequest{ - SubnetIds: flex.ExpandStringSet(d.Get("vpc_config.0.subnet_ids").(*schema.Set)), + config := &types.VpcConfigRequest{ + SubnetIds: flex.ExpandStringValueSet(d.Get("vpc_config.0.subnet_ids").(*schema.Set)), } if err := updateVPCConfig(ctx, conn, d.Id(), config, d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -506,8 +507,8 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } if d.HasChange("vpc_config.0.security_group_ids") { - config := &eks.VpcConfigRequest{ - SecurityGroupIds: flex.ExpandStringSet(d.Get("vpc_config.0.security_group_ids").(*schema.Set)), + config := &types.VpcConfigRequest{ + SecurityGroupIds: flex.ExpandStringValueSet(d.Get("vpc_config.0.security_group_ids").(*schema.Set)), } if err := updateVPCConfig(ctx, conn, d.Id(), config, d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -519,9 +520,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) - - log.Printf("[DEBUG] Deleting EKS Cluster: %s", d.Id()) + conn := meta.(*conns.AWSClient).EKSClient(ctx) input := &eks.DeleteClusterInput{ Name: aws.String(d.Id()), @@ -532,12 +531,13 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int const ( timeout = 60 * time.Minute ) + log.Printf("[DEBUG] Deleting EKS Cluster: %s", d.Id()) err := tfresource.Retry(ctx, timeout, func() *retry.RetryError { var err error - _, err = conn.DeleteClusterWithContext(ctx, input) + _, err = conn.DeleteCluster(ctx, input) - if tfawserr.ErrMessageContains(err, eks.ErrCodeResourceInUseException, "in progress") { + if errs.IsAErrorMessageContains[*types.ResourceInUseException](err, "in progress") { return retry.RetryableError(err) } @@ -549,16 +549,16 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int }, tfresource.WithDelayRand(1*time.Minute), tfresource.WithPollInterval(30*time.Second)) if tfresource.TimedOut(err) { - _, err = conn.DeleteClusterWithContext(ctx, input) + _, err = conn.DeleteCluster(ctx, input) } - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil } // Sometimes the EKS API returns the ResourceNotFound error in this form: // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if tfawserr.ErrMessageContains(err, eks.ErrCodeClientException, "No cluster found for name:") { + if errs.IsAErrorMessageContains[*types.ClientException](err, "No cluster found for name:") { return nil } @@ -573,16 +573,16 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int return nil } -func FindClusterByName(ctx context.Context, conn *eks.EKS, name string) (*eks.Cluster, error) { +func findClusterByName(ctx context.Context, conn *eks.Client, name string) (*types.Cluster, error) { input := &eks.DescribeClusterInput{ Name: aws.String(name), } - output, err := conn.DescribeClusterWithContext(ctx, input) + output, err := conn.DescribeCluster(ctx, input) // Sometimes the EKS API returns the ResourceNotFound error in this form: // ClientException: No cluster found for name: tf-acc-test-0o1f8 - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) || tfawserr.ErrMessageContains(err, eks.ErrCodeClientException, "No cluster found for name:") { + if errs.IsA[*types.ResourceNotFoundException](err) || errs.IsAErrorMessageContains[*types.ClientException](err, "No cluster found for name:") { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -600,19 +600,19 @@ func FindClusterByName(ctx context.Context, conn *eks.EKS, name string) (*eks.Cl return output.Cluster, nil } -func updateVPCConfig(ctx context.Context, conn *eks.EKS, name string, config *eks.VpcConfigRequest, timeout time.Duration) error { +func updateVPCConfig(ctx context.Context, conn *eks.Client, name string, config *types.VpcConfigRequest, timeout time.Duration) error { input := &eks.UpdateClusterConfigInput{ Name: aws.String(name), ResourcesVpcConfig: config, } - output, err := conn.UpdateClusterConfigWithContext(ctx, input) + output, err := conn.UpdateClusterConfig(ctx, input) if err != nil { return fmt.Errorf("updating EKS Cluster (%s) VPC config: %s", name, err) } - updateID := aws.StringValue(output.Update.Id) + updateID := aws.ToString(output.Update.Id) if _, err := waitClusterUpdateSuccessful(ctx, conn, name, updateID, timeout); err != nil { return fmt.Errorf("waiting for EKS Cluster (%s) VPC config update (%s): %s", name, updateID, err) @@ -621,15 +621,15 @@ func updateVPCConfig(ctx context.Context, conn *eks.EKS, name string, config *ek return nil } -func findClusterUpdateByTwoPartKey(ctx context.Context, conn *eks.EKS, name, id string) (*eks.Update, error) { +func findClusterUpdateByTwoPartKey(ctx context.Context, conn *eks.Client, name, id string) (*types.Update, error) { input := &eks.DescribeUpdateInput{ Name: aws.String(name), UpdateId: aws.String(id), } - output, err := conn.DescribeUpdateWithContext(ctx, input) + output, err := conn.DescribeUpdate(ctx, input) - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -647,9 +647,9 @@ func findClusterUpdateByTwoPartKey(ctx context.Context, conn *eks.EKS, name, id return output.Update, nil } -func statusCluster(ctx context.Context, conn *eks.EKS, name string) retry.StateRefreshFunc { +func statusCluster(ctx context.Context, conn *eks.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindClusterByName(ctx, conn, name) + output, err := findClusterByName(ctx, conn, name) if tfresource.NotFound(err) { return nil, "", nil @@ -659,11 +659,11 @@ func statusCluster(ctx context.Context, conn *eks.EKS, name string) retry.StateR return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func statusClusterUpdate(ctx context.Context, conn *eks.EKS, name, id string) retry.StateRefreshFunc { +func statusClusterUpdate(ctx context.Context, conn *eks.Client, name, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findClusterUpdateByTwoPartKey(ctx, conn, name, id) @@ -675,30 +675,30 @@ func statusClusterUpdate(ctx context.Context, conn *eks.EKS, name, id string) re return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func waitClusterCreated(ctx context.Context, conn *eks.EKS, name string, timeout time.Duration) (*eks.Cluster, error) { +func waitClusterCreated(ctx context.Context, conn *eks.Client, name string, timeout time.Duration) (*types.Cluster, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{eks.ClusterStatusPending, eks.ClusterStatusCreating}, - Target: []string{eks.ClusterStatusActive}, + Pending: enum.Slice(types.ClusterStatusPending, types.ClusterStatusCreating), + Target: enum.Slice(types.ClusterStatusActive), Refresh: statusCluster(ctx, conn, name), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*eks.Cluster); ok { + if output, ok := outputRaw.(*types.Cluster); ok { return output, err } return nil, err } -func waitClusterDeleted(ctx context.Context, conn *eks.EKS, name string, timeout time.Duration) (*eks.Cluster, error) { +func waitClusterDeleted(ctx context.Context, conn *eks.Client, name string, timeout time.Duration) (*types.Cluster, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{eks.ClusterStatusActive, eks.ClusterStatusDeleting}, + Pending: enum.Slice(types.ClusterStatusActive, types.ClusterStatusDeleting), Target: []string{}, Refresh: statusCluster(ctx, conn, name), Timeout: timeout, @@ -706,26 +706,26 @@ func waitClusterDeleted(ctx context.Context, conn *eks.EKS, name string, timeout outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*eks.Cluster); ok { + if output, ok := outputRaw.(*types.Cluster); ok { return output, err } return nil, err } -func waitClusterUpdateSuccessful(ctx context.Context, conn *eks.EKS, name, id string, timeout time.Duration) (*eks.Update, error) { //nolint:unparam +func waitClusterUpdateSuccessful(ctx context.Context, conn *eks.Client, name, id string, timeout time.Duration) (*types.Update, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ - Pending: []string{eks.UpdateStatusInProgress}, - Target: []string{eks.UpdateStatusSuccessful}, + Pending: enum.Slice(types.UpdateStatusInProgress), + Target: enum.Slice(types.UpdateStatusSuccessful), Refresh: statusClusterUpdate(ctx, conn, name, id), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*eks.Update); ok { - if status := aws.StringValue(output.Status); status == eks.UpdateStatusCancelled || status == eks.UpdateStatusFailed { - tfresource.SetLastError(err, ErrorDetailsError(output.Errors)) + if output, ok := outputRaw.(*types.Update); ok { + if status := output.Status; status == types.UpdateStatusCancelled || status == types.UpdateStatusFailed { + tfresource.SetLastError(err, errorDetailsError(output.Errors)) } return output, err @@ -734,12 +734,12 @@ func waitClusterUpdateSuccessful(ctx context.Context, conn *eks.EKS, name, id st return nil, err } -func expandEncryptionConfig(tfList []interface{}) []*eks.EncryptionConfig { +func expandEncryptionConfig(tfList []interface{}) []types.EncryptionConfig { if len(tfList) == 0 { return nil } - var apiObjects []*eks.EncryptionConfig + var apiObjects []types.EncryptionConfig for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -748,12 +748,12 @@ func expandEncryptionConfig(tfList []interface{}) []*eks.EncryptionConfig { continue } - apiObject := &eks.EncryptionConfig{ + apiObject := types.EncryptionConfig{ Provider: expandProvider(tfMap["provider"].([]interface{})), } if v, ok := tfMap["resources"].(*schema.Set); ok && v.Len() > 0 { - apiObject.Resources = flex.ExpandStringSet(v) + apiObject.Resources = flex.ExpandStringValueSet(v) } apiObjects = append(apiObjects, apiObject) @@ -762,14 +762,14 @@ func expandEncryptionConfig(tfList []interface{}) []*eks.EncryptionConfig { return apiObjects } -func expandProvider(tfList []interface{}) *eks.Provider { +func expandProvider(tfList []interface{}) *types.Provider { tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - apiObject := &eks.Provider{} + apiObject := &types.Provider{} if v, ok := tfMap["key_arn"].(string); ok && v != "" { apiObject.KeyArn = aws.String(v) @@ -778,14 +778,14 @@ func expandProvider(tfList []interface{}) *eks.Provider { return apiObject } -func expandOutpostConfigRequest(l []interface{}) *eks.OutpostConfigRequest { +func expandOutpostConfigRequest(l []interface{}) *types.OutpostConfigRequest { tfMap, ok := l[0].(map[string]interface{}) if !ok { return nil } - outpostConfigRequest := &eks.OutpostConfigRequest{} + outpostConfigRequest := &types.OutpostConfigRequest{} if v, ok := tfMap["control_plane_instance_type"].(string); ok && v != "" { outpostConfigRequest.ControlPlaneInstanceType = aws.String(v) @@ -796,13 +796,13 @@ func expandOutpostConfigRequest(l []interface{}) *eks.OutpostConfigRequest { } if v, ok := tfMap["outpost_arns"].(*schema.Set); ok && v.Len() > 0 { - outpostConfigRequest.OutpostArns = flex.ExpandStringSet(v) + outpostConfigRequest.OutpostArns = flex.ExpandStringValueSet(v) } return outpostConfigRequest } -func expandControlPlanePlacement(tfList []interface{}) *eks.ControlPlanePlacementRequest { +func expandControlPlanePlacement(tfList []interface{}) *types.ControlPlanePlacementRequest { if len(tfList) == 0 { return nil } @@ -813,7 +813,7 @@ func expandControlPlanePlacement(tfList []interface{}) *eks.ControlPlanePlacemen return nil } - apiObject := &eks.ControlPlanePlacementRequest{} + apiObject := &types.ControlPlanePlacementRequest{} if v, ok := tfMap["group_name"].(string); ok && v != "" { apiObject.GroupName = aws.String(v) @@ -822,81 +822,81 @@ func expandControlPlanePlacement(tfList []interface{}) *eks.ControlPlanePlacemen return apiObject } -func expandVPCConfigRequestForCreate(l []interface{}) *eks.VpcConfigRequest { +func expandVPCConfigRequestForCreate(l []interface{}) *types.VpcConfigRequest { if len(l) == 0 { return nil } m := l[0].(map[string]interface{}) - vpcConfigRequest := &eks.VpcConfigRequest{ + vpcConfigRequest := &types.VpcConfigRequest{ EndpointPrivateAccess: aws.Bool(m["endpoint_private_access"].(bool)), EndpointPublicAccess: aws.Bool(m["endpoint_public_access"].(bool)), - SecurityGroupIds: flex.ExpandStringSet(m["security_group_ids"].(*schema.Set)), - SubnetIds: flex.ExpandStringSet(m["subnet_ids"].(*schema.Set)), + SecurityGroupIds: flex.ExpandStringValueSet(m["security_group_ids"].(*schema.Set)), + SubnetIds: flex.ExpandStringValueSet(m["subnet_ids"].(*schema.Set)), } if v, ok := m["public_access_cidrs"].(*schema.Set); ok && v.Len() > 0 { - vpcConfigRequest.PublicAccessCidrs = flex.ExpandStringSet(v) + vpcConfigRequest.PublicAccessCidrs = flex.ExpandStringValueSet(v) } return vpcConfigRequest } -func expandKubernetesNetworkConfigRequest(tfList []interface{}) *eks.KubernetesNetworkConfigRequest { +func expandKubernetesNetworkConfigRequest(tfList []interface{}) *types.KubernetesNetworkConfigRequest { tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - apiObject := &eks.KubernetesNetworkConfigRequest{} + apiObject := &types.KubernetesNetworkConfigRequest{} if v, ok := tfMap["service_ipv4_cidr"].(string); ok && v != "" { apiObject.ServiceIpv4Cidr = aws.String(v) } if v, ok := tfMap["ip_family"].(string); ok && v != "" { - apiObject.IpFamily = aws.String(v) + apiObject.IpFamily = types.IpFamily(v) } return apiObject } -func expandLogging(vEnabledLogTypes *schema.Set) *eks.Logging { +func expandLogging(vEnabledLogTypes *schema.Set) *types.Logging { vEksLogTypes := []interface{}{} - for _, eksLogType := range eks.LogType_Values() { + for _, eksLogType := range enum.Values[types.LogType]() { vEksLogTypes = append(vEksLogTypes, eksLogType) } vAllLogTypes := schema.NewSet(schema.HashString, vEksLogTypes) - return &eks.Logging{ - ClusterLogging: []*eks.LogSetup{ + return &types.Logging{ + ClusterLogging: []types.LogSetup{ { Enabled: aws.Bool(true), - Types: flex.ExpandStringSet(vEnabledLogTypes), + Types: flex.ExpandStringyValueSet[types.LogType](vEnabledLogTypes), }, { Enabled: aws.Bool(false), - Types: flex.ExpandStringSet(vAllLogTypes.Difference(vEnabledLogTypes)), + Types: flex.ExpandStringyValueSet[types.LogType](vAllLogTypes.Difference(vEnabledLogTypes)), }, }, } } -func flattenCertificate(certificate *eks.Certificate) []map[string]interface{} { +func flattenCertificate(certificate *types.Certificate) []map[string]interface{} { if certificate == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "data": aws.StringValue(certificate.Data), + "data": aws.ToString(certificate.Data), } return []map[string]interface{}{m} } -func flattenIdentity(identity *eks.Identity) []map[string]interface{} { +func flattenIdentity(identity *types.Identity) []map[string]interface{} { if identity == nil { return []map[string]interface{}{} } @@ -908,19 +908,19 @@ func flattenIdentity(identity *eks.Identity) []map[string]interface{} { return []map[string]interface{}{m} } -func flattenOIDC(oidc *eks.OIDC) []map[string]interface{} { +func flattenOIDC(oidc *types.OIDC) []map[string]interface{} { if oidc == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "issuer": aws.StringValue(oidc.Issuer), + "issuer": aws.ToString(oidc.Issuer), } return []map[string]interface{}{m} } -func flattenEncryptionConfig(apiObjects []*eks.EncryptionConfig) []interface{} { +func flattenEncryptionConfig(apiObjects []types.EncryptionConfig) []interface{} { if len(apiObjects) == 0 { return nil } @@ -930,7 +930,7 @@ func flattenEncryptionConfig(apiObjects []*eks.EncryptionConfig) []interface{} { for _, apiObject := range apiObjects { tfMap := map[string]interface{}{ "provider": flattenProvider(apiObject.Provider), - "resources": aws.StringValueSlice(apiObject.Resources), + "resources": apiObject.Resources, } tfList = append(tfList, tfMap) @@ -939,43 +939,43 @@ func flattenEncryptionConfig(apiObjects []*eks.EncryptionConfig) []interface{} { return tfList } -func flattenProvider(apiObject *eks.Provider) []interface{} { +func flattenProvider(apiObject *types.Provider) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "key_arn": aws.StringValue(apiObject.KeyArn), + "key_arn": aws.ToString(apiObject.KeyArn), } return []interface{}{tfMap} } -func flattenVPCConfigResponse(vpcConfig *eks.VpcConfigResponse) []map[string]interface{} { +func flattenVPCConfigResponse(vpcConfig *types.VpcConfigResponse) []map[string]interface{} { if vpcConfig == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "cluster_security_group_id": aws.StringValue(vpcConfig.ClusterSecurityGroupId), - "endpoint_private_access": aws.BoolValue(vpcConfig.EndpointPrivateAccess), - "endpoint_public_access": aws.BoolValue(vpcConfig.EndpointPublicAccess), - "security_group_ids": flex.FlattenStringSet(vpcConfig.SecurityGroupIds), - "subnet_ids": flex.FlattenStringSet(vpcConfig.SubnetIds), - "public_access_cidrs": flex.FlattenStringSet(vpcConfig.PublicAccessCidrs), - "vpc_id": aws.StringValue(vpcConfig.VpcId), + "cluster_security_group_id": aws.ToString(vpcConfig.ClusterSecurityGroupId), + "endpoint_private_access": vpcConfig.EndpointPrivateAccess, + "endpoint_public_access": vpcConfig.EndpointPublicAccess, + "security_group_ids": vpcConfig.SecurityGroupIds, + "subnet_ids": vpcConfig.SubnetIds, + "public_access_cidrs": vpcConfig.PublicAccessCidrs, + "vpc_id": aws.ToString(vpcConfig.VpcId), } return []map[string]interface{}{m} } -func flattenLogging(logging *eks.Logging) *schema.Set { - enabledLogTypes := []*string{} +func flattenLogging(logging *types.Logging) []string { + enabledLogTypes := []types.LogType{} if logging != nil { logSetups := logging.ClusterLogging for _, logSetup := range logSetups { - if logSetup == nil || !aws.BoolValue(logSetup.Enabled) { + if !aws.ToBool(logSetup.Enabled) { continue } @@ -983,44 +983,44 @@ func flattenLogging(logging *eks.Logging) *schema.Set { } } - return flex.FlattenStringSet(enabledLogTypes) + return enum.Slice(enabledLogTypes...) } -func flattenKubernetesNetworkConfigResponse(apiObject *eks.KubernetesNetworkConfigResponse) []interface{} { +func flattenKubernetesNetworkConfigResponse(apiObject *types.KubernetesNetworkConfigResponse) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "service_ipv4_cidr": aws.StringValue(apiObject.ServiceIpv4Cidr), - "service_ipv6_cidr": aws.StringValue(apiObject.ServiceIpv6Cidr), - "ip_family": aws.StringValue(apiObject.IpFamily), + "service_ipv4_cidr": aws.ToString(apiObject.ServiceIpv4Cidr), + "service_ipv6_cidr": aws.ToString(apiObject.ServiceIpv6Cidr), + "ip_family": apiObject.IpFamily, } return []interface{}{tfMap} } -func flattenOutpostConfigResponse(apiObject *eks.OutpostConfigResponse) []interface{} { +func flattenOutpostConfigResponse(apiObject *types.OutpostConfigResponse) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "control_plane_instance_type": aws.StringValue(apiObject.ControlPlaneInstanceType), + "control_plane_instance_type": aws.ToString(apiObject.ControlPlaneInstanceType), "control_plane_placement": flattenControlPlanePlacementResponse(apiObject.ControlPlanePlacement), - "outpost_arns": aws.StringValueSlice(apiObject.OutpostArns), + "outpost_arns": apiObject.OutpostArns, } return []interface{}{tfMap} } -func flattenControlPlanePlacementResponse(apiObject *eks.ControlPlanePlacementResponse) []interface{} { +func flattenControlPlanePlacementResponse(apiObject *types.ControlPlanePlacementResponse) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - "group_name": aws.StringValue(apiObject.GroupName), + "group_name": aws.ToString(apiObject.GroupName), } return []interface{}{tfMap} diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index f3617cdbf7e..63d874b3e44 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -66,7 +66,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceCluster, + Factory: resourceCluster, TypeName: "aws_eks_cluster", Name: "Cluster", Tags: &types.ServicePackageResourceTags{ From 8c561ce48c05c2dbcf871c92c0ab74fc48d23215 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 15:39:25 -0500 Subject: [PATCH 35/46] d/aws_eks_cluster: Migrate to AWS SDK for Go v2. --- internal/service/eks/cluster_data_source.go | 11 +++++------ internal/service/eks/service_package_gen.go | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/internal/service/eks/cluster_data_source.go b/internal/service/eks/cluster_data_source.go index 18cc7eeeb9f..fa8917958ae 100644 --- a/internal/service/eks/cluster_data_source.go +++ b/internal/service/eks/cluster_data_source.go @@ -6,7 +6,7 @@ package eks import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -14,7 +14,7 @@ import ( ) // @SDKDataSource("aws_eks_cluster") -func DataSourceCluster() *schema.Resource { +func dataSourceCluster() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceClusterRead, @@ -47,7 +47,6 @@ func DataSourceCluster() *schema.Resource { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, "endpoint": { Type: schema.TypeString, @@ -190,11 +189,11 @@ func DataSourceCluster() *schema.Resource { } func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).EKSConn(ctx) + conn := meta.(*conns.AWSClient).EKSClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig name := d.Get("name").(string) - cluster, err := FindClusterByName(ctx, conn, name) + cluster, err := findClusterByName(ctx, conn, name) if err != nil { return diag.Errorf("reading EKS Cluster (%s): %s", name, err) @@ -209,7 +208,7 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int if cluster.OutpostConfig != nil { d.Set("cluster_id", cluster.Id) } - d.Set("created_at", aws.TimeValue(cluster.CreatedAt).String()) + d.Set("created_at", aws.ToTime(cluster.CreatedAt).String()) if err := d.Set("enabled_cluster_log_types", flattenLogging(cluster.Logging)); err != nil { return diag.Errorf("setting enabled_cluster_log_types: %s", err) } diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 63d874b3e44..3f8d01c0243 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -33,7 +33,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_eks_addon_version", }, { - Factory: DataSourceCluster, + Factory: dataSourceCluster, TypeName: "aws_eks_cluster", }, { From 55b77a8894a4d84324c087abf60c23826b611b4e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 16:17:49 -0500 Subject: [PATCH 36/46] eks: Migrate sweepers to AWS SDK for Go v2. --- internal/service/eks/sweep.go | 339 ++++++++++++++++------------------ 1 file changed, 161 insertions(+), 178 deletions(-) diff --git a/internal/service/eks/sweep.go b/internal/service/eks/sweep.go index b6338af1e91..354db406cce 100644 --- a/internal/service/eks/sweep.go +++ b/internal/service/eks/sweep.go @@ -7,13 +7,14 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -55,64 +56,59 @@ func sweepAddons(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - - conn := client.EKSConn(ctx) + conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := eks.NewListClustersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Add-On sweep for %s: %s", region, err) + return nil + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) + break } for _, v := range page.Clusters { - clusterName := aws.StringValue(v) + clusterName := v input := &eks.ListAddonsInput{ ClusterName: aws.String(clusterName), } - err := conn.ListAddonsPagesWithContext(ctx, input, func(page *eks.ListAddonsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := eks.NewListAddonsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + continue + } + + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if errs.IsA[*types.ResourceNotFoundException](err) { + continue + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Add-Ons (%s): %w", region, err)) + break } for _, v := range page.Addons { - r := ResourceAddon() + r := resourceAddon() d := r.Data(nil) - d.SetId(AddonCreateResourceID(clusterName, aws.StringValue(v))) + d.SetId(AddonCreateResourceID(clusterName, v)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - continue - } - - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Add-Ons (%s): %w", region, err)) } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Print(fmt.Errorf("[WARN] Skipping EKS Add-Ons sweep for %s: %w", region, err)) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -130,33 +126,30 @@ func sweepClusters(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.EKSConn(ctx) + conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := eks.NewListClustersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Cluster sweep for %s: %s", region, err) + return nil } - for _, cluster := range page.Clusters { - r := ResourceCluster() + if err != nil { + return fmt.Errorf("error listing EKS Clusters (%s): %w", region, err) + } + + for _, v := range page.Clusters { + r := resourceCluster() d := r.Data(nil) - d.SetId(aws.StringValue(cluster)) + d.SetId(v) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EKS Clusters sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing EKS Clusters (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -174,62 +167,59 @@ func sweepFargateProfiles(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.EKSConn(ctx) + conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := eks.NewListClustersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Fargate Profile sweep for %s: %s", region, err) + return nil + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) + break } - for _, cluster := range page.Clusters { + for _, v := range page.Clusters { + clusterName := v input := &eks.ListFargateProfilesInput{ - ClusterName: cluster, + ClusterName: aws.String(clusterName), } - err := conn.ListFargateProfilesPagesWithContext(ctx, input, func(page *eks.ListFargateProfilesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, profile := range page.FargateProfileNames { - r := ResourceFargateProfile() - d := r.Data(nil) - d.SetId(FargateProfileCreateResourceID(aws.StringValue(cluster), aws.StringValue(profile))) + pages := eks.NewListFargateProfilesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + if awsv2.SkipSweepError(err) { + continue } - return !lastPage - }) + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if errs.IsA[*types.ResourceNotFoundException](err) { + continue + } - if awsv1.SkipSweepError(err) { - continue - } + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Fargate Profiles (%s): %w", region, err)) + break + } - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - continue - } + for _, v := range page.FargateProfileNames { + r := resourceFargateProfile() + d := r.Data(nil) + d.SetId(FargateProfileCreateResourceID(clusterName, v)) - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Fargate Profiles (%s): %w", region, err)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EKS Fargate Profiles sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -247,63 +237,59 @@ func sweepIdentityProvidersConfig(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - - conn := client.EKSConn(ctx) + conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := eks.NewListClustersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Identity Provider Config sweep for %s: %s", region, err) + return nil } - for _, cluster := range page.Clusters { + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) + break + } + + for _, v := range page.Clusters { + clusterName := v input := &eks.ListIdentityProviderConfigsInput{ - ClusterName: cluster, + ClusterName: aws.String(clusterName), } - err := conn.ListIdentityProviderConfigsPagesWithContext(ctx, input, func(page *eks.ListIdentityProviderConfigsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, identityProviderConfig := range page.IdentityProviderConfigs { - r := ResourceIdentityProviderConfig() - d := r.Data(nil) - d.SetId(IdentityProviderConfigCreateResourceID(aws.StringValue(cluster), aws.StringValue(identityProviderConfig.Name))) + pages := eks.NewListIdentityProviderConfigsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + if awsv2.SkipSweepError(err) { + continue } - return !lastPage - }) + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if errs.IsA[*types.ResourceNotFoundException](err) { + continue + } - if awsv1.SkipSweepError(err) { - continue - } + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Identity Provider Configs (%s): %w", region, err)) + break + } - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - continue - } + for _, v := range page.IdentityProviderConfigs { + r := resourceIdentityProviderConfig() + d := r.Data(nil) + d.SetId(IdentityProviderConfigCreateResourceID(clusterName, aws.ToString(v.Name))) - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Identity Provider Configs (%s): %w", region, err)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Print(fmt.Errorf("[WARN] Skipping EKS Identity Provider Configs sweep for %s: %w", region, err)) - return sweeperErrs // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -321,62 +307,59 @@ func sweepNodeGroups(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.EKSConn(ctx) + conn := client.EKSClient(ctx) input := &eks.ListClustersInput{} var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersPagesWithContext(ctx, input, func(page *eks.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := eks.NewListClustersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EKS Node Group sweep for %s: %s", region, err) + return nil } - for _, cluster := range page.Clusters { + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) + break + } + + for _, v := range page.Clusters { + clusterName := v input := &eks.ListNodegroupsInput{ - ClusterName: cluster, + ClusterName: aws.String(clusterName), } - err := conn.ListNodegroupsPagesWithContext(ctx, input, func(page *eks.ListNodegroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, nodeGroup := range page.Nodegroups { - r := ResourceNodeGroup() - d := r.Data(nil) - d.SetId(NodeGroupCreateResourceID(aws.StringValue(cluster), aws.StringValue(nodeGroup))) + pages := eks.NewListNodegroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + if awsv2.SkipSweepError(err) { + continue } - return !lastPage - }) + // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. + // ¯\_(ツ)_/¯ + if errs.IsA[*types.ResourceNotFoundException](err) { + continue + } - if awsv1.SkipSweepError(err) { - continue - } + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Node Groups (%s): %w", region, err)) + break + } - // There are EKS clusters that are listed (and are in the AWS Console) but can't be found. - // ¯\_(ツ)_/¯ - if tfawserr.ErrCodeEquals(err, eks.ErrCodeResourceNotFoundException) { - continue - } + for _, v := range page.Nodegroups { + r := resourceNodeGroup() + d := r.Data(nil) + d.SetId(NodeGroupCreateResourceID(clusterName, v)) - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Node Groups (%s): %w", region, err)) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EKS Node Groups sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EKS Clusters (%s): %w", region, err)) } err = sweep.SweepOrchestrator(ctx, sweepResources) From b20f095e1b230f5d0edf9d15f972855de44d0a82 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 16:23:02 -0500 Subject: [PATCH 37/46] Add 'names.ChinaPartitionID'. --- names/names.go | 1 + 1 file changed, 1 insertion(+) diff --git a/names/names.go b/names/names.go index 21fadc9135f..36644d13484 100644 --- a/names/names.go +++ b/names/names.go @@ -81,6 +81,7 @@ const ( // These should move to aws-sdk-go-base. // See https://github.com/hashicorp/aws-sdk-go-base/issues/649. const ( + ChinaPartitionID = "aws-cn" // AWS China partition. StandardPartitionID = "aws" // AWS Standard partition. USGovCloudPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. ) From dd5c9285eb66833e6ba450d21286a997667859e7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 16:23:25 -0500 Subject: [PATCH 38/46] eks: Migrate 'Canonicalize' to AWS SDK for Go v2. --- internal/service/eks/arn.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/eks/arn.go b/internal/service/eks/arn.go index a8ece1dada7..0144829fbfe 100644 --- a/internal/service/eks/arn.go +++ b/internal/service/eks/arn.go @@ -16,8 +16,8 @@ import ( "fmt" "strings" - awsarn "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/endpoints" + awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/hashicorp/terraform-provider-aws/names" ) // Canonicalize validates IAM resources are appropriate for the authenticator @@ -71,9 +71,9 @@ func Canonicalize(arn string) (string, error) { func checkPartition(partition string) error { switch partition { - case endpoints.AwsPartitionID: - case endpoints.AwsCnPartitionID: - case endpoints.AwsUsGovPartitionID: + case names.StandardPartitionID: + case names.ChinaPartitionID: + case names.USGovCloudPartitionID: default: return fmt.Errorf("partion %q is not recognized", partition) } From 9d992011967802d57e26ee16faad2cf9728817ba Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 7 Nov 2023 16:47:47 -0500 Subject: [PATCH 39/46] eks: Migrate acceptance tests to AWS SDK for Go v2. --- internal/service/eks/addon_test.go | 102 ++++++----- .../eks/addon_version_data_source_test.go | 8 +- .../eks/cluster_auth_data_source_test.go | 4 +- .../service/eks/cluster_data_source_test.go | 6 +- internal/service/eks/cluster_test.go | 103 ++++++------ .../service/eks/clusters_data_source_test.go | 7 +- internal/service/eks/exports_test.go | 19 +++ internal/service/eks/fargate_profile_test.go | 101 +++++------ .../eks/identity_provider_config_test.go | 41 ++--- .../eks/node_group_data_source_test.go | 7 +- internal/service/eks/node_group_test.go | 159 +++++++++--------- .../eks/node_groups_data_source_test.go | 4 +- 12 files changed, 271 insertions(+), 290 deletions(-) create mode 100644 internal/service/eks/exports_test.go diff --git a/internal/service/eks/addon_test.go b/internal/service/eks/addon_test.go index 47cd54ac3ae..9796bc64085 100644 --- a/internal/service/eks/addon_test.go +++ b/internal/service/eks/addon_test.go @@ -9,7 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -17,11 +18,12 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEKSAddon_basic(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) clusterResourceName := "aws_eks_cluster.test" addonResourceName := "aws_eks_addon.test" @@ -29,7 +31,7 @@ func TestAccEKSAddon_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ @@ -57,14 +59,14 @@ func TestAccEKSAddon_basic(t *testing.T) { func TestAccEKSAddon_disappears(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ @@ -82,7 +84,7 @@ func TestAccEKSAddon_disappears(t *testing.T) { func TestAccEKSAddon_Disappears_cluster(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" clusterResourceName := "aws_eks_cluster.test" @@ -90,7 +92,7 @@ func TestAccEKSAddon_Disappears_cluster(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ @@ -108,7 +110,7 @@ func TestAccEKSAddon_Disappears_cluster(t *testing.T) { func TestAccEKSAddon_addonVersion(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2 eks.Addon + var addon1, addon2 types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" @@ -117,7 +119,7 @@ func TestAccEKSAddon_addonVersion(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ @@ -147,14 +149,14 @@ func TestAccEKSAddon_addonVersion(t *testing.T) { func TestAccEKSAddon_preserve(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ @@ -177,22 +179,22 @@ func TestAccEKSAddon_preserve(t *testing.T) { func TestAccEKSAddon_deprecated(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2, addon3 eks.Addon + var addon1, addon2, addon3 types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonConfig_deprecated(rName, addonName, eks.ResolveConflictsNone), + Config: testAccAddonConfig_deprecated(rName, addonName, string(types.ResolveConflictsNone)), Check: resource.ComposeTestCheckFunc( testAccCheckAddonExists(ctx, resourceName, &addon1), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", eks.ResolveConflictsNone), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", string(types.ResolveConflictsNone)), ), }, { @@ -202,17 +204,17 @@ func TestAccEKSAddon_deprecated(t *testing.T) { ImportStateVerifyIgnore: []string{"resolve_conflicts"}, }, { - Config: testAccAddonConfig_deprecated(rName, addonName, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_deprecated(rName, addonName, string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( testAccCheckAddonExists(ctx, resourceName, &addon2), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", eks.ResolveConflictsOverwrite), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", string(types.ResolveConflictsOverwrite)), ), }, { - Config: testAccAddonConfig_deprecated(rName, addonName, eks.ResolveConflictsPreserve), + Config: testAccAddonConfig_deprecated(rName, addonName, string(types.ResolveConflictsPreserve)), Check: resource.ComposeTestCheckFunc( testAccCheckAddonExists(ctx, resourceName, &addon3), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", eks.ResolveConflictsPreserve), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts", string(types.ResolveConflictsPreserve)), ), }, }, @@ -221,23 +223,23 @@ func TestAccEKSAddon_deprecated(t *testing.T) { func TestAccEKSAddon_resolveConflicts(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2, addon3 eks.Addon + var addon1, addon2, addon3 types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonConfig_resolveConflicts(rName, addonName, eks.ResolveConflictsNone, eks.ResolveConflictsNone), + Config: testAccAddonConfig_resolveConflicts(rName, addonName, string(types.ResolveConflictsNone), string(types.ResolveConflictsNone)), Check: resource.ComposeTestCheckFunc( testAccCheckAddonExists(ctx, resourceName, &addon1), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", eks.ResolveConflictsNone), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", eks.ResolveConflictsNone), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", string(types.ResolveConflictsNone)), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", string(types.ResolveConflictsNone)), ), }, { @@ -247,19 +249,19 @@ func TestAccEKSAddon_resolveConflicts(t *testing.T) { ImportStateVerifyIgnore: []string{"resolve_conflicts_on_create", "resolve_conflicts_on_update"}, }, { - Config: testAccAddonConfig_resolveConflicts(rName, addonName, eks.ResolveConflictsOverwrite, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_resolveConflicts(rName, addonName, string(types.ResolveConflictsOverwrite), string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( testAccCheckAddonExists(ctx, resourceName, &addon2), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", eks.ResolveConflictsOverwrite), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", eks.ResolveConflictsOverwrite), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", string(types.ResolveConflictsOverwrite)), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", string(types.ResolveConflictsOverwrite)), ), }, { - Config: testAccAddonConfig_resolveConflicts(rName, addonName, eks.ResolveConflictsOverwrite, eks.ResolveConflictsPreserve), + Config: testAccAddonConfig_resolveConflicts(rName, addonName, string(types.ResolveConflictsOverwrite), string(types.ResolveConflictsPreserve)), Check: resource.ComposeTestCheckFunc( testAccCheckAddonExists(ctx, resourceName, &addon3), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", eks.ResolveConflictsOverwrite), - resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", eks.ResolveConflictsPreserve), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_create", string(types.ResolveConflictsOverwrite)), + resource.TestCheckResourceAttr(resourceName, "resolve_conflicts_on_update", string(types.ResolveConflictsPreserve)), ), }, }, @@ -268,7 +270,7 @@ func TestAccEKSAddon_resolveConflicts(t *testing.T) { func TestAccEKSAddon_serviceAccountRoleARN(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" serviceRoleResourceName := "aws_iam_role.test-service-role" @@ -276,7 +278,7 @@ func TestAccEKSAddon_serviceAccountRoleARN(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ @@ -298,7 +300,7 @@ func TestAccEKSAddon_serviceAccountRoleARN(t *testing.T) { func TestAccEKSAddon_configurationValues(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon + var addon types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" configurationValues := "{\"env\": {\"WARM_ENI_TARGET\":\"2\",\"ENABLE_POD_ENI\":\"true\"},\"resources\": {\"limits\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"}}}" @@ -310,12 +312,12 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, configurationValues, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, configurationValues, string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( testAccCheckAddonExists(ctx, resourceName, &addon), resource.TestCheckResourceAttr(resourceName, "configuration_values", configurationValues), @@ -328,21 +330,21 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { ImportStateVerifyIgnore: []string{"resolve_conflicts"}, }, { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, updateConfigurationValues, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, updateConfigurationValues, string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( testAccCheckAddonExists(ctx, resourceName, &addon), resource.TestCheckResourceAttr(resourceName, "configuration_values", updateConfigurationValues), ), }, { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, emptyConfigurationValues, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, emptyConfigurationValues, string(types.ResolveConflictsOverwrite)), Check: resource.ComposeTestCheckFunc( testAccCheckAddonExists(ctx, resourceName, &addon), resource.TestCheckResourceAttr(resourceName, "configuration_values", emptyConfigurationValues), ), }, { - Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, invalidConfigurationValues, eks.ResolveConflictsOverwrite), + Config: testAccAddonConfig_configurationValues(rName, addonName, addonVersion, invalidConfigurationValues, string(types.ResolveConflictsOverwrite)), ExpectError: regexache.MustCompile(`InvalidParameterException: ConfigurationValue provided in request is not supported`), }, }, @@ -351,14 +353,14 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { func TestAccEKSAddon_tags(t *testing.T) { ctx := acctest.Context(t) - var addon1, addon2, addon3 eks.Addon + var addon1, addon2, addon3 types.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ @@ -396,26 +398,21 @@ func TestAccEKSAddon_tags(t *testing.T) { }) } -func testAccCheckAddonExists(ctx context.Context, n string, v *eks.Addon) resource.TestCheckFunc { +func testAccCheckAddonExists(ctx context.Context, n string, v *types.Addon) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No EKS Add-On ID is set") - } - clusterName, addonName, err := tfeks.AddonParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - output, err := tfeks.FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) + output, err := tfeks.FindAddonByTwoPartKey(ctx, conn, clusterName, addonName) if err != nil { return err @@ -429,7 +426,7 @@ func testAccCheckAddonExists(ctx context.Context, n string, v *eks.Addon) resour func testAccCheckAddonDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_addon" { @@ -437,12 +434,11 @@ func testAccCheckAddonDestroy(ctx context.Context) resource.TestCheckFunc { } clusterName, addonName, err := tfeks.AddonParseResourceID(rs.Primary.ID) - if err != nil { return err } - _, err = tfeks.FindAddonByClusterNameAndAddonName(ctx, conn, clusterName, addonName) + _, err = tfeks.FindAddonByTwoPartKey(ctx, conn, clusterName, addonName) if tfresource.NotFound(err) { continue @@ -460,11 +456,11 @@ func testAccCheckAddonDestroy(ctx context.Context) resource.TestCheckFunc { } func testAccPreCheckAddon(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) input := &eks.DescribeAddonVersionsInput{} - _, err := conn.DescribeAddonVersionsWithContext(ctx, input) + _, err := conn.DescribeAddonVersions(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/eks/addon_version_data_source_test.go b/internal/service/eks/addon_version_data_source_test.go index 576fc022b33..d9ca1253395 100644 --- a/internal/service/eks/addon_version_data_source_test.go +++ b/internal/service/eks/addon_version_data_source_test.go @@ -7,15 +7,14 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEKSAddonVersionDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - var addon eks.Addon rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) versionDataSourceName := "data.aws_eks_addon_version.test" addonDataSourceName := "data.aws_eks_addon.test" @@ -23,14 +22,12 @@ func TestAccEKSAddonVersionDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckAddonDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, true), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, addonDataSourceName, &addon), resource.TestCheckResourceAttrPair(versionDataSourceName, "version", addonDataSourceName, "addon_version"), resource.TestCheckResourceAttrPair(versionDataSourceName, "addon_name", addonDataSourceName, "addon_name"), resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "true"), @@ -39,7 +36,6 @@ func TestAccEKSAddonVersionDataSource_basic(t *testing.T) { { Config: testAccAddonVersionDataSourceConfig_basic(rName, addonName, false), Check: resource.ComposeTestCheckFunc( - testAccCheckAddonExists(ctx, addonDataSourceName, &addon), resource.TestCheckResourceAttrPair(versionDataSourceName, "version", addonDataSourceName, "addon_version"), resource.TestCheckResourceAttrPair(versionDataSourceName, "addon_name", addonDataSourceName, "addon_name"), resource.TestCheckResourceAttr(versionDataSourceName, "most_recent", "false"), diff --git a/internal/service/eks/cluster_auth_data_source_test.go b/internal/service/eks/cluster_auth_data_source_test.go index 2e58cd74b3f..283ef858b9c 100644 --- a/internal/service/eks/cluster_auth_data_source_test.go +++ b/internal/service/eks/cluster_auth_data_source_test.go @@ -7,11 +7,11 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEKSClusterAuthDataSource_basic(t *testing.T) { @@ -20,7 +20,7 @@ func TestAccEKSClusterAuthDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { diff --git a/internal/service/eks/cluster_data_source_test.go b/internal/service/eks/cluster_data_source_test.go index 4cab4dd9196..3f4a82aba5c 100644 --- a/internal/service/eks/cluster_data_source_test.go +++ b/internal/service/eks/cluster_data_source_test.go @@ -7,10 +7,10 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEKSClusterDataSource_basic(t *testing.T) { @@ -21,7 +21,7 @@ func TestAccEKSClusterDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -70,7 +70,7 @@ func TestAccEKSClusterDataSource_outpost(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/eks/cluster_test.go b/internal/service/eks/cluster_test.go index cf5ce6b2eb7..9ff88b6bfc3 100644 --- a/internal/service/eks/cluster_test.go +++ b/internal/service/eks/cluster_test.go @@ -11,8 +11,9 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -20,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) const ( @@ -29,13 +31,13 @@ const ( func TestAccEKSCluster_basic(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -57,7 +59,7 @@ func TestAccEKSCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.ip_family", "ipv4"), resource.TestMatchResourceAttr(resourceName, "platform_version", regexache.MustCompile(`^eks\.\d+$`)), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "status", eks.ClusterStatusActive), + resource.TestCheckResourceAttr(resourceName, "status", string(types.ClusterStatusActive)), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestMatchResourceAttr(resourceName, "version", regexache.MustCompile(`^\d+\.\d+$`)), resource.TestCheckResourceAttr(resourceName, "vpc_config.#", "1"), @@ -79,13 +81,13 @@ func TestAccEKSCluster_basic(t *testing.T) { func TestAccEKSCluster_disappears(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -103,14 +105,14 @@ func TestAccEKSCluster_disappears(t *testing.T) { func TestAccEKSCluster_Encryption_create(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" kmsKeyResourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -135,14 +137,14 @@ func TestAccEKSCluster_Encryption_create(t *testing.T) { func TestAccEKSCluster_Encryption_update(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" kmsKeyResourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -176,14 +178,14 @@ func TestAccEKSCluster_Encryption_update(t *testing.T) { // https://github.com/hashicorp/terraform-provider-aws/issues/19968. func TestAccEKSCluster_Encryption_versionUpdate(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" kmsKeyResourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -221,13 +223,13 @@ func TestAccEKSCluster_Encryption_versionUpdate(t *testing.T) { func TestAccEKSCluster_version(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -257,13 +259,13 @@ func TestAccEKSCluster_version(t *testing.T) { func TestAccEKSCluster_logging(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -305,13 +307,13 @@ func TestAccEKSCluster_logging(t *testing.T) { func TestAccEKSCluster_tags(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2, cluster3 eks.Cluster + var cluster1, cluster2, cluster3 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -351,13 +353,13 @@ func TestAccEKSCluster_tags(t *testing.T) { func TestAccEKSCluster_VPC_securityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -380,13 +382,13 @@ func TestAccEKSCluster_VPC_securityGroupIDs(t *testing.T) { func TestAccEKSCluster_VPC_securityGroupIDsAndSubnetIDs_update(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -420,13 +422,13 @@ func TestAccEKSCluster_VPC_securityGroupIDsAndSubnetIDs_update(t *testing.T) { func TestAccEKSCluster_VPC_endpointPrivateAccess(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2, cluster3 eks.Cluster + var cluster1, cluster2, cluster3 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -467,13 +469,13 @@ func TestAccEKSCluster_VPC_endpointPrivateAccess(t *testing.T) { func TestAccEKSCluster_VPC_endpointPublicAccess(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2, cluster3 eks.Cluster + var cluster1, cluster2, cluster3 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -514,13 +516,13 @@ func TestAccEKSCluster_VPC_endpointPublicAccess(t *testing.T) { func TestAccEKSCluster_VPC_publicAccessCIDRs(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -551,13 +553,13 @@ func TestAccEKSCluster_VPC_publicAccessCIDRs(t *testing.T) { func TestAccEKSCluster_Network_serviceIPv4CIDR(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -614,13 +616,13 @@ func TestAccEKSCluster_Network_serviceIPv4CIDR(t *testing.T) { func TestAccEKSCluster_Network_ipFamily(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 eks.Cluster + var cluster1, cluster2 types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -665,14 +667,14 @@ func TestAccEKSCluster_Network_ipFamily(t *testing.T) { func TestAccEKSCluster_Outpost_create(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" controlPlaneInstanceType := "m5d.large" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -697,14 +699,14 @@ func TestAccEKSCluster_Outpost_create(t *testing.T) { func TestAccEKSCluster_Outpost_placement(t *testing.T) { ctx := acctest.Context(t) - var cluster eks.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" controlPlaneInstanceType := "m5d.large" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckOutpostsOutposts(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -728,17 +730,14 @@ func TestAccEKSCluster_Outpost_placement(t *testing.T) { }) } -func testAccCheckClusterExists(ctx context.Context, resourceName string, cluster *eks.Cluster) resource.TestCheckFunc { +func testAccCheckClusterExists(ctx context.Context, n string, v *types.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No EKS Cluster ID is set") + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) output, err := tfeks.FindClusterByName(ctx, conn, rs.Primary.ID) @@ -746,7 +745,7 @@ func testAccCheckClusterExists(ctx context.Context, resourceName string, cluster return err } - *cluster = *output + *v = *output return nil } @@ -759,7 +758,7 @@ func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) _, err := tfeks.FindClusterByName(ctx, conn, rs.Primary.ID) @@ -778,9 +777,9 @@ func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckClusterRecreated(i, j *eks.Cluster) resource.TestCheckFunc { +func testAccCheckClusterRecreated(i, j *types.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { + if aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { return errors.New("EKS Cluster was not recreated") } @@ -788,9 +787,9 @@ func testAccCheckClusterRecreated(i, j *eks.Cluster) resource.TestCheckFunc { } } -func testAccCheckClusterNotRecreated(i, j *eks.Cluster) resource.TestCheckFunc { +func testAccCheckClusterNotRecreated(i, j *types.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { + if !aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { return errors.New("EKS Cluster was recreated") } @@ -799,11 +798,11 @@ func testAccCheckClusterNotRecreated(i, j *eks.Cluster) resource.TestCheckFunc { } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) input := &eks.ListClustersInput{} - _, err := conn.ListClustersWithContext(ctx, input) + _, err := conn.ListClusters(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/eks/clusters_data_source_test.go b/internal/service/eks/clusters_data_source_test.go index a35a5c0f752..d4d02357276 100644 --- a/internal/service/eks/clusters_data_source_test.go +++ b/internal/service/eks/clusters_data_source_test.go @@ -6,10 +6,10 @@ package eks_test import ( "testing" - "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEKSClustersDataSource_basic(t *testing.T) { @@ -19,7 +19,7 @@ func TestAccEKSClustersDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -34,8 +34,7 @@ func TestAccEKSClustersDataSource_basic(t *testing.T) { } func testAccClustersDataSourceConfig_basic(rName string) string { - return acctest.ConfigCompose( - testAccClusterConfig_required(rName), ` + return acctest.ConfigCompose(testAccClusterConfig_required(rName), ` data "aws_eks_clusters" "test" { depends_on = [aws_eks_cluster.test] } diff --git a/internal/service/eks/exports_test.go b/internal/service/eks/exports_test.go new file mode 100644 index 00000000000..3edfc733ce6 --- /dev/null +++ b/internal/service/eks/exports_test.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package eks + +// Exports for use in tests only. +var ( + ResourceAddon = resourceAddon + ResourceCluster = resourceCluster + ResourceFargateProfile = resourceFargateProfile + ResourceIdentityProviderConfig = resourceIdentityProviderConfig + ResourceNodeGroup = resourceNodeGroup + + FindAddonByTwoPartKey = findAddonByTwoPartKey + FindClusterByName = findClusterByName + FindFargateProfileByTwoPartKey = findFargateProfileByTwoPartKey + FindNodegroupByTwoPartKey = findNodegroupByTwoPartKey + FindOIDCIdentityProviderConfigByTwoPartKey = findOIDCIdentityProviderConfigByTwoPartKey +) diff --git a/internal/service/eks/fargate_profile_test.go b/internal/service/eks/fargate_profile_test.go index 7bfca00372f..ab6cd2e7327 100644 --- a/internal/service/eks/fargate_profile_test.go +++ b/internal/service/eks/fargate_profile_test.go @@ -9,8 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -18,19 +17,24 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEKSFargateProfile_basic(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile eks.FargateProfile + var fargateProfile types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) eksClusterResourceName := "aws_eks_cluster.test" iamRoleResourceName := "aws_iam_role.pod" resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartition(t, names.StandardPartitionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -43,7 +47,7 @@ func TestAccEKSFargateProfile_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "fargate_profile_name", rName), resource.TestCheckResourceAttrPair(resourceName, "pod_execution_role_arn", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "selector.#", "1"), - resource.TestCheckResourceAttr(resourceName, "status", eks.FargateProfileStatusActive), + resource.TestCheckResourceAttr(resourceName, "status", string(types.FargateProfileStatusActive)), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "2"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -59,13 +63,17 @@ func TestAccEKSFargateProfile_basic(t *testing.T) { func TestAccEKSFargateProfile_disappears(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile eks.FargateProfile + var fargateProfile types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartition(t, names.StandardPartitionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -83,14 +91,18 @@ func TestAccEKSFargateProfile_disappears(t *testing.T) { func TestAccEKSFargateProfile_Multi_profile(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile eks.FargateProfile + var fargateProfile types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName1 := "aws_eks_fargate_profile.test.0" resourceName2 := "aws_eks_fargate_profile.test.1" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartition(t, names.StandardPartitionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -107,13 +119,17 @@ func TestAccEKSFargateProfile_Multi_profile(t *testing.T) { func TestAccEKSFargateProfile_Selector_labels(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile1 eks.FargateProfile + var fargateProfile1 types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartition(t, names.StandardPartitionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -134,13 +150,17 @@ func TestAccEKSFargateProfile_Selector_labels(t *testing.T) { func TestAccEKSFargateProfile_tags(t *testing.T) { ctx := acctest.Context(t) - var fargateProfile1, fargateProfile2, fargateProfile3 eks.FargateProfile + var fargateProfile1, fargateProfile2, fargateProfile3 types.FargateProfile rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_fargate_profile.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckFargateProfile(t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartition(t, names.StandardPartitionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFargateProfileDestroy(ctx), Steps: []resource.TestStep{ @@ -178,7 +198,7 @@ func TestAccEKSFargateProfile_tags(t *testing.T) { }) } -func testAccCheckFargateProfileExists(ctx context.Context, n string, v *eks.FargateProfile) resource.TestCheckFunc { +func testAccCheckFargateProfileExists(ctx context.Context, n string, v *types.FargateProfile) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -190,14 +210,13 @@ func testAccCheckFargateProfileExists(ctx context.Context, n string, v *eks.Farg } clusterName, fargateProfileName, err := tfeks.FargateProfileParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - output, err := tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) + output, err := tfeks.FindFargateProfileByTwoPartKey(ctx, conn, clusterName, fargateProfileName) if err != nil { return err @@ -211,7 +230,7 @@ func testAccCheckFargateProfileExists(ctx context.Context, n string, v *eks.Farg func testAccCheckFargateProfileDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_fargate_profile" { @@ -219,12 +238,11 @@ func testAccCheckFargateProfileDestroy(ctx context.Context) resource.TestCheckFu } clusterName, fargateProfileName, err := tfeks.FargateProfileParseResourceID(rs.Primary.ID) - if err != nil { return err } - _, err = tfeks.FindFargateProfileByClusterNameAndFargateProfileName(ctx, conn, clusterName, fargateProfileName) + _, err = tfeks.FindFargateProfileByTwoPartKey(ctx, conn, clusterName, fargateProfileName) if tfresource.NotFound(err) { continue @@ -241,39 +259,6 @@ func testAccCheckFargateProfileDestroy(ctx context.Context) resource.TestCheckFu } } -func testAccPreCheckFargateProfile(t *testing.T) { - // Most PreCheck functions try to use a list or describe API call to - // determine service or functionality availability, however - // ListFargateProfiles requires a valid ClusterName and does not indicate - // that the functionality is unavailable in a region. The create API call - // fails with same "ResourceNotFoundException: No cluster found" before - // returning the definitive "InvalidRequestException: CreateFargateProfile - // is not supported for region" error. We do not want to wait 20 minutes to - // create and destroy an EKS Cluster just to find the real error, instead - // we take the least desirable approach of hardcoding allowed regions. - allowedRegions := []string{ - endpoints.ApEast1RegionID, - endpoints.ApNortheast1RegionID, - endpoints.ApNortheast2RegionID, - endpoints.ApSouth1RegionID, - endpoints.ApSoutheast1RegionID, - endpoints.ApSoutheast2RegionID, - endpoints.CaCentral1RegionID, - endpoints.EuCentral1RegionID, - endpoints.EuNorth1RegionID, - endpoints.EuWest1RegionID, - endpoints.EuWest2RegionID, - endpoints.EuWest3RegionID, - endpoints.MeSouth1RegionID, - endpoints.SaEast1RegionID, - endpoints.UsEast1RegionID, - endpoints.UsEast2RegionID, - endpoints.UsWest1RegionID, - endpoints.UsWest2RegionID, - } - acctest.PreCheckRegion(t, allowedRegions...) -} - func testAccFargateProfileConfig_base(rName string) string { return fmt.Sprintf(` data "aws_availability_zones" "available" { diff --git a/internal/service/eks/identity_provider_config_test.go b/internal/service/eks/identity_provider_config_test.go index e0fd7f1fa95..daf4fa0db5c 100644 --- a/internal/service/eks/identity_provider_config_test.go +++ b/internal/service/eks/identity_provider_config_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -17,18 +17,19 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEKSIdentityProviderConfig_basic(t *testing.T) { ctx := acctest.Context(t) - var config eks.OidcIdentityProviderConfig + var config types.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) eksClusterResourceName := "aws_eks_cluster.test" resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -65,13 +66,13 @@ func TestAccEKSIdentityProviderConfig_basic(t *testing.T) { func TestAccEKSIdentityProviderConfig_disappears(t *testing.T) { ctx := acctest.Context(t) - var config eks.OidcIdentityProviderConfig + var config types.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -89,13 +90,13 @@ func TestAccEKSIdentityProviderConfig_disappears(t *testing.T) { func TestAccEKSIdentityProviderConfig_allOIDCOptions(t *testing.T) { ctx := acctest.Context(t) - var config eks.OidcIdentityProviderConfig + var config types.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -127,13 +128,13 @@ func TestAccEKSIdentityProviderConfig_allOIDCOptions(t *testing.T) { func TestAccEKSIdentityProviderConfig_tags(t *testing.T) { ctx := acctest.Context(t) - var config eks.OidcIdentityProviderConfig + var config types.OidcIdentityProviderConfig rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_identity_provider_config.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckIdentityProviderConfigDestroy(ctx), Steps: []resource.TestStep{ @@ -171,32 +172,27 @@ func TestAccEKSIdentityProviderConfig_tags(t *testing.T) { }) } -func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName string, config *eks.OidcIdentityProviderConfig) resource.TestCheckFunc { +func testAccCheckIdentityProviderExistsConfig(ctx context.Context, n string, v *types.OidcIdentityProviderConfig) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No EKS Identity Profile Config ID is set") + return fmt.Errorf("Not found: %s", n) } clusterName, configName, err := tfeks.IdentityProviderConfigParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - output, err := tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) + output, err := tfeks.FindOIDCIdentityProviderConfigByTwoPartKey(ctx, conn, clusterName, configName) if err != nil { return err } - *config = *output + *v = *output return nil } @@ -204,7 +200,7 @@ func testAccCheckIdentityProviderExistsConfig(ctx context.Context, resourceName func testAccCheckIdentityProviderConfigDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_identity_provider_config" { @@ -212,12 +208,11 @@ func testAccCheckIdentityProviderConfigDestroy(ctx context.Context) resource.Tes } clusterName, configName, err := tfeks.IdentityProviderConfigParseResourceID(rs.Primary.ID) - if err != nil { return err } - _, err = tfeks.FindOIDCIdentityProviderConfigByClusterNameAndConfigName(ctx, conn, clusterName, configName) + _, err = tfeks.FindOIDCIdentityProviderConfigByTwoPartKey(ctx, conn, clusterName, configName) if tfresource.NotFound(err) { continue diff --git a/internal/service/eks/node_group_data_source_test.go b/internal/service/eks/node_group_data_source_test.go index e44c6c34166..3eb8b08aa9b 100644 --- a/internal/service/eks/node_group_data_source_test.go +++ b/internal/service/eks/node_group_data_source_test.go @@ -7,24 +7,22 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEKSNodeGroupDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceResourceName := "data.aws_eks_node_group.test" resourceName := "aws_eks_node_group.test" resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccNodeGroupConfig_dataSourceName(rName), @@ -33,7 +31,6 @@ func TestAccEKSNodeGroupDataSource_basic(t *testing.T) { { Config: testAccNodeGroupDataSourceConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup), resource.TestCheckResourceAttrPair(resourceName, "ami_type", dataSourceResourceName, "ami_type"), resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceResourceName, "arn"), resource.TestCheckResourceAttrPair(resourceName, "capacity_type", dataSourceResourceName, "capacity_type"), diff --git a/internal/service/eks/node_group_test.go b/internal/service/eks/node_group_test.go index 379a7ee0ab3..ebff80dd6c9 100644 --- a/internal/service/eks/node_group_test.go +++ b/internal/service/eks/node_group_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -18,15 +18,16 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func init() { - acctest.RegisterServiceErrorCheckFunc(eks.EndpointsID, testAccErrorCheckSkip) + acctest.RegisterServiceErrorCheckFunc(names.EKSEndpointID, testAccErrorCheckSkip) } func TestAccEKSNodeGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup + var nodeGroup types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) eksClusterResourceName := "aws_eks_cluster.test" iamRoleResourceName := "aws_iam_role.node" @@ -34,7 +35,7 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -42,10 +43,10 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { Config: testAccNodeGroupConfig_dataSourceName(rName), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup), - resource.TestCheckResourceAttr(resourceName, "ami_type", eks.AMITypesAl2X8664), + resource.TestCheckResourceAttr(resourceName, "ami_type", string(types.AMITypesAl2X8664)), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "eks", regexache.MustCompile(fmt.Sprintf("nodegroup/%[1]s/%[1]s/.+", rName))), resource.TestCheckResourceAttrPair(resourceName, "cluster_name", eksClusterResourceName, "name"), - resource.TestCheckResourceAttr(resourceName, "capacity_type", eks.CapacityTypesOnDemand), + resource.TestCheckResourceAttr(resourceName, "capacity_type", string(types.CapacityTypesOnDemand)), resource.TestCheckResourceAttr(resourceName, "disk_size", "20"), resource.TestCheckResourceAttr(resourceName, "instance_types.#", "1"), resource.TestCheckResourceAttr(resourceName, "labels.%", "0"), @@ -60,7 +61,7 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "scaling_config.0.desired_size", "1"), resource.TestCheckResourceAttr(resourceName, "scaling_config.0.max_size", "1"), resource.TestCheckResourceAttr(resourceName, "scaling_config.0.min_size", "1"), - resource.TestCheckResourceAttr(resourceName, "status", eks.NodegroupStatusActive), + resource.TestCheckResourceAttr(resourceName, "status", string(types.NodegroupStatusActive)), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "2"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "taint.#", "0"), @@ -79,13 +80,13 @@ func TestAccEKSNodeGroup_basic(t *testing.T) { func TestAccEKSNodeGroup_Name_generated(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup + var nodeGroup types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -108,13 +109,13 @@ func TestAccEKSNodeGroup_Name_generated(t *testing.T) { func TestAccEKSNodeGroup_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup + var nodeGroup types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -137,13 +138,13 @@ func TestAccEKSNodeGroup_namePrefix(t *testing.T) { func TestAccEKSNodeGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup eks.Nodegroup + var nodeGroup types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -161,21 +162,21 @@ func TestAccEKSNodeGroup_disappears(t *testing.T) { func TestAccEKSNodeGroup_amiType(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccNodeGroupConfig_amiType(rName, eks.AMITypesAl2X8664Gpu), + Config: testAccNodeGroupConfig_amiType(rName, string(types.AMITypesAl2X8664Gpu)), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup1), - resource.TestCheckResourceAttr(resourceName, "ami_type", eks.AMITypesAl2X8664Gpu), + resource.TestCheckResourceAttr(resourceName, "ami_type", string(types.AMITypesAl2X8664Gpu)), ), }, { @@ -184,10 +185,10 @@ func TestAccEKSNodeGroup_amiType(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccNodeGroupConfig_amiType(rName, eks.AMITypesAl2Arm64), + Config: testAccNodeGroupConfig_amiType(rName, string(types.AMITypesAl2Arm64)), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup2), - resource.TestCheckResourceAttr(resourceName, "ami_type", eks.AMITypesAl2Arm64), + resource.TestCheckResourceAttr(resourceName, "ami_type", string(types.AMITypesAl2Arm64)), ), }, }, @@ -196,21 +197,21 @@ func TestAccEKSNodeGroup_amiType(t *testing.T) { func TestAccEKSNodeGroup_CapacityType_spot(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccNodeGroupConfig_capacityType(rName, eks.CapacityTypesSpot), + Config: testAccNodeGroupConfig_capacityType(rName, string(types.CapacityTypesSpot)), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup1), - resource.TestCheckResourceAttr(resourceName, "capacity_type", eks.CapacityTypesSpot), + resource.TestCheckResourceAttr(resourceName, "capacity_type", string(types.CapacityTypesSpot)), ), }, { @@ -224,13 +225,13 @@ func TestAccEKSNodeGroup_CapacityType_spot(t *testing.T) { func TestAccEKSNodeGroup_diskSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -252,13 +253,13 @@ func TestAccEKSNodeGroup_diskSize(t *testing.T) { func TestAccEKSNodeGroup_forceUpdateVersion(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -288,14 +289,14 @@ func TestAccEKSNodeGroup_forceUpdateVersion(t *testing.T) { func TestAccEKSNodeGroup_InstanceTypes_multiple(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" instanceTypes := fmt.Sprintf("%q, %q, %q, %q", "t2.medium", "t3.medium", "t2.large", "t3.large") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -321,13 +322,13 @@ func TestAccEKSNodeGroup_InstanceTypes_multiple(t *testing.T) { func TestAccEKSNodeGroup_InstanceTypes_single(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -349,13 +350,13 @@ func TestAccEKSNodeGroup_InstanceTypes_single(t *testing.T) { func TestAccEKSNodeGroup_labels(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2, nodeGroup3 eks.Nodegroup + var nodeGroup1, nodeGroup2, nodeGroup3 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -395,7 +396,7 @@ func TestAccEKSNodeGroup_labels(t *testing.T) { func TestAccEKSNodeGroup_LaunchTemplate_id(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateResourceName1 := "aws_launch_template.test1" launchTemplateResourceName2 := "aws_launch_template.test2" @@ -403,7 +404,7 @@ func TestAccEKSNodeGroup_LaunchTemplate_id(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -435,7 +436,7 @@ func TestAccEKSNodeGroup_LaunchTemplate_id(t *testing.T) { func TestAccEKSNodeGroup_LaunchTemplate_name(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateResourceName1 := "aws_launch_template.test1" launchTemplateResourceName2 := "aws_launch_template.test2" @@ -443,7 +444,7 @@ func TestAccEKSNodeGroup_LaunchTemplate_name(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -475,14 +476,14 @@ func TestAccEKSNodeGroup_LaunchTemplate_name(t *testing.T) { func TestAccEKSNodeGroup_LaunchTemplate_version(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateResourceName := "aws_launch_template.test" resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -514,14 +515,14 @@ func TestAccEKSNodeGroup_LaunchTemplate_version(t *testing.T) { func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) ssmParameterDataSourceName := "data.aws_ssm_parameter.test" resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -551,7 +552,7 @@ func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { func TestAccEKSNodeGroup_RemoteAccess_ec2SSHKey(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" @@ -562,7 +563,7 @@ func TestAccEKSNodeGroup_RemoteAccess_ec2SSHKey(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -585,7 +586,7 @@ func TestAccEKSNodeGroup_RemoteAccess_ec2SSHKey(t *testing.T) { func TestAccEKSNodeGroup_RemoteAccess_sourceSecurityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" @@ -596,7 +597,7 @@ func TestAccEKSNodeGroup_RemoteAccess_sourceSecurityGroupIDs(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -619,13 +620,13 @@ func TestAccEKSNodeGroup_RemoteAccess_sourceSecurityGroupIDs(t *testing.T) { func TestAccEKSNodeGroup_Scaling_desiredSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -661,13 +662,13 @@ func TestAccEKSNodeGroup_Scaling_desiredSize(t *testing.T) { func TestAccEKSNodeGroup_Scaling_maxSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -703,13 +704,13 @@ func TestAccEKSNodeGroup_Scaling_maxSize(t *testing.T) { func TestAccEKSNodeGroup_Scaling_minSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -745,13 +746,13 @@ func TestAccEKSNodeGroup_Scaling_minSize(t *testing.T) { func TestAccEKSNodeGroup_ScalingZeroDesiredSize_minSize(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -797,13 +798,13 @@ func TestAccEKSNodeGroup_ScalingZeroDesiredSize_minSize(t *testing.T) { func TestAccEKSNodeGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2, nodeGroup3 eks.Nodegroup + var nodeGroup1, nodeGroup2, nodeGroup3 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -845,13 +846,13 @@ func TestAccEKSNodeGroup_tags(t *testing.T) { func TestAccEKSNodeGroup_taints(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -909,13 +910,13 @@ func TestAccEKSNodeGroup_taints(t *testing.T) { func TestAccEKSNodeGroup_update(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1 eks.Nodegroup + var nodeGroup1 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -948,13 +949,13 @@ func TestAccEKSNodeGroup_update(t *testing.T) { func TestAccEKSNodeGroup_version(t *testing.T) { ctx := acctest.Context(t) - var nodeGroup1, nodeGroup2 eks.Nodegroup + var nodeGroup1, nodeGroup2 types.Nodegroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_node_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ @@ -988,32 +989,27 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { ) } -func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeGroup *eks.Nodegroup) resource.TestCheckFunc { +func testAccCheckNodeGroupExists(ctx context.Context, n string, v *types.Nodegroup) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No EKS Node Group ID is set") + return fmt.Errorf("Not found: %s", n) } clusterName, nodeGroupName, err := tfeks.NodeGroupParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) - output, err := tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) + output, err := tfeks.FindNodegroupByTwoPartKey(ctx, conn, clusterName, nodeGroupName) if err != nil { return err } - *nodeGroup = *output + *v = *output return nil } @@ -1021,7 +1017,7 @@ func testAccCheckNodeGroupExists(ctx context.Context, resourceName string, nodeG func testAccCheckNodeGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EKSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EKSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_eks_node_group" { @@ -1029,12 +1025,11 @@ func testAccCheckNodeGroupDestroy(ctx context.Context) resource.TestCheckFunc { } clusterName, nodeGroupName, err := tfeks.NodeGroupParseResourceID(rs.Primary.ID) - if err != nil { return err } - _, err = tfeks.FindNodegroupByClusterNameAndNodegroupName(ctx, conn, clusterName, nodeGroupName) + _, err = tfeks.FindNodegroupByTwoPartKey(ctx, conn, clusterName, nodeGroupName) if tfresource.NotFound(err) { continue @@ -1051,20 +1046,20 @@ func testAccCheckNodeGroupDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckNodeGroupNotRecreated(i, j *eks.Nodegroup) resource.TestCheckFunc { +func testAccCheckNodeGroupNotRecreated(i, j *types.Nodegroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { - return fmt.Errorf("EKS Node Group (%s) was recreated", aws.StringValue(j.NodegroupName)) + if !aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { + return fmt.Errorf("EKS Node Group (%s) was recreated", aws.ToString(j.NodegroupName)) } return nil } } -func testAccCheckNodeGroupRecreated(i, j *eks.Nodegroup) resource.TestCheckFunc { +func testAccCheckNodeGroupRecreated(i, j *types.Nodegroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.TimeValue(i.CreatedAt).Equal(aws.TimeValue(j.CreatedAt)) { - return fmt.Errorf("EKS Node Group (%s) was not recreated", aws.StringValue(j.NodegroupName)) + if aws.ToTime(i.CreatedAt).Equal(aws.ToTime(j.CreatedAt)) { + return fmt.Errorf("EKS Node Group (%s) was not recreated", aws.ToString(j.NodegroupName)) } return nil diff --git a/internal/service/eks/node_groups_data_source_test.go b/internal/service/eks/node_groups_data_source_test.go index 2dd1c1c8942..458952a4428 100644 --- a/internal/service/eks/node_groups_data_source_test.go +++ b/internal/service/eks/node_groups_data_source_test.go @@ -7,10 +7,10 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/eks" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEKSNodeGroupsDataSource_basic(t *testing.T) { @@ -20,7 +20,7 @@ func TestAccEKSNodeGroupsDataSource_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, eks.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.EKSEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ From 733736bc804bb2357462994ca8c6ff3dd610dc18 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 8 Nov 2023 07:49:34 -0500 Subject: [PATCH 40/46] Fix 'TestAccEKSCluster_Network_ipFamily'. --- internal/service/eks/cluster_test.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/internal/service/eks/cluster_test.go b/internal/service/eks/cluster_test.go index 9ff88b6bfc3..02b6fb2ce22 100644 --- a/internal/service/eks/cluster_test.go +++ b/internal/service/eks/cluster_test.go @@ -626,14 +626,6 @@ func TestAccEKSCluster_Network_ipFamily(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ - { - Config: testAccClusterConfig_networkIPFamily(rName, `"v6"`), - ExpectError: regexache.MustCompile(`expected .* to be one of \[ipv4 ipv6]`), - }, - { - Config: testAccClusterConfig_networkIPFamily(rName, `"IPv4"`), - ExpectError: regexache.MustCompile(`expected .* to be one of \[ipv4 ipv6]`), - }, { Config: testAccClusterConfig_networkIPFamily(rName, `"ipv6"`), Check: resource.ComposeTestCheckFunc( From d1d43ba916548ff4f3be8f429bab5bdefb727ba9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 8 Nov 2023 08:08:37 -0500 Subject: [PATCH 41/46] internal/slices: 'RemoveAll' varargs. --- internal/slices/slices.go | 6 ++++-- internal/slices/slices_test.go | 28 +++++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/internal/slices/slices.go b/internal/slices/slices.go index 3c0d5f8d2a6..0509594f5ee 100644 --- a/internal/slices/slices.go +++ b/internal/slices/slices.go @@ -18,11 +18,11 @@ func Reverse[S ~[]E, E any](s S) S { } // RemoveAll removes all occurrences of the specified value `r` from a slice `s`. -func RemoveAll[S ~[]E, E comparable](s S, r E) S { +func RemoveAll[S ~[]E, E comparable](s S, vs ...E) S { v := S(make([]E, 0, len(s))) for _, e := range s { - if e != r { + if !slices.Contains(vs, e) { v = append(v, e) } } @@ -115,6 +115,8 @@ func AppendUnique[S ~[]E, E comparable](s S, vs ...E) S { } // IndexOf returns the index of the first occurrence of `v` in `s`, or -1 if not present. +// This function is similar to the `Index` function in the Go standard `slices` package, +// the difference being that `s` is a slice of `any` and a runtime type check is made. func IndexOf[S ~[]any, E comparable](s S, v E) int { for i := range s { if e, ok := s[i].(E); ok && v == e { diff --git a/internal/slices/slices_test.go b/internal/slices/slices_test.go index 392c7d60671..919792a3ce8 100644 --- a/internal/slices/slices_test.go +++ b/internal/slices/slices_test.go @@ -55,29 +55,55 @@ func TestRemoveAll(t *testing.T) { type testCase struct { input []string + remove []string expected []string } tests := map[string]testCase{ "two occurrences": { input: []string{"one", "two", "one"}, + remove: []string{"one"}, expected: []string{"two"}, }, "one occurrences": { input: []string{"one", "two"}, + remove: []string{"one"}, expected: []string{"two"}, }, "only occurrence": { input: []string{"one"}, + remove: []string{"one"}, expected: []string{}, }, "no occurrences": { input: []string{"two", "three", "four"}, + remove: []string{"one"}, expected: []string{"two", "three", "four"}, }, "zero elements": { input: []string{}, + remove: []string{"one"}, expected: []string{}, }, + "duplicate remove": { + input: []string{"one", "two", "one"}, + remove: []string{"one", "one"}, + expected: []string{"two"}, + }, + "remove all": { + input: []string{"one", "two", "three", "two", "one"}, + remove: []string{"two", "one", "one", "three"}, + expected: []string{}, + }, + "remove none": { + input: []string{"two", "three", "four"}, + remove: []string{"six", "one"}, + expected: []string{"two", "three", "four"}, + }, + "remove two": { + input: []string{"one", "two", "three", "four", "five", "six"}, + remove: []string{"six", "one"}, + expected: []string{"two", "three", "four", "five"}, + }, } for name, test := range tests { @@ -85,7 +111,7 @@ func TestRemoveAll(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() - got := RemoveAll(test.input, "one") + got := RemoveAll(test.input, test.remove...) if diff := cmp.Diff(got, test.expected); diff != "" { t.Errorf("unexpected diff (+wanted, -got): %s", diff) From 8386ad0e51b02a676ff6ab1c101fea8cc6030b37 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 8 Nov 2023 08:17:08 -0500 Subject: [PATCH 42/46] internal/enum: Add 'EnumValues' function. --- internal/enum/values.go | 8 +++++--- internal/enum/values_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) create mode 100644 internal/enum/values_test.go diff --git a/internal/enum/values.go b/internal/enum/values.go index 51ecaa4a527..e0a02c98868 100644 --- a/internal/enum/values.go +++ b/internal/enum/values.go @@ -8,10 +8,12 @@ type Valueser[T ~string] interface { Values() []T } -func Values[T Valueser[T]]() []string { - l := T("").Values() +func EnumValues[T Valueser[T]]() []T { + return T("").Values() +} - return Slice(l...) +func Values[T Valueser[T]]() []string { + return Slice(EnumValues[T]()...) } func Slice[T Valueser[T]](l ...T) []string { diff --git a/internal/enum/values_test.go b/internal/enum/values_test.go new file mode 100644 index 00000000000..edc4d835b34 --- /dev/null +++ b/internal/enum/values_test.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package enum + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/accessanalyzer/types" + "github.com/google/go-cmp/cmp" +) + +func TestValues(t *testing.T) { + t.Parallel() + + want := []string{ + "READ", + "WRITE", + "READ_ACP", + "WRITE_ACP", + "FULL_CONTROL", + } + got := Values[types.AclPermission]() + + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } +} From e4e11d5d40b93071816b0bdfff1cc710e1730efb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 8 Nov 2023 08:20:03 -0500 Subject: [PATCH 43/46] r/aws_eks_cluster: Correct 'expandLogging'. --- internal/service/eks/cluster.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index 5b087e13a2c..0df982164b8 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -864,21 +865,18 @@ func expandKubernetesNetworkConfigRequest(tfList []interface{}) *types.Kubernete } func expandLogging(vEnabledLogTypes *schema.Set) *types.Logging { - vEksLogTypes := []interface{}{} - for _, eksLogType := range enum.Values[types.LogType]() { - vEksLogTypes = append(vEksLogTypes, eksLogType) - } - vAllLogTypes := schema.NewSet(schema.HashString, vEksLogTypes) + allLogTypes := enum.EnumValues[types.LogType]() + enabledLogTypes := flex.ExpandStringyValueSet[types.LogType](vEnabledLogTypes) return &types.Logging{ ClusterLogging: []types.LogSetup{ { Enabled: aws.Bool(true), - Types: flex.ExpandStringyValueSet[types.LogType](vEnabledLogTypes), + Types: enabledLogTypes, }, { Enabled: aws.Bool(false), - Types: flex.ExpandStringyValueSet[types.LogType](vAllLogTypes.Difference(vEnabledLogTypes)), + Types: tfslices.RemoveAll(allLogTypes, enabledLogTypes...), }, }, } From f5c4cc217bda20a2c5d12835b292be5242c1211f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 8 Nov 2023 08:23:32 -0500 Subject: [PATCH 44/46] r/aws_eks_cluster: Update Kubernetes versions for acceptance tests. --- internal/service/eks/cluster_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/eks/cluster_test.go b/internal/service/eks/cluster_test.go index 02b6fb2ce22..200bbb9ca2f 100644 --- a/internal/service/eks/cluster_test.go +++ b/internal/service/eks/cluster_test.go @@ -25,8 +25,8 @@ import ( ) const ( - clusterVersionUpgradeInitial = "1.21" - clusterVersionUpgradeUpdated = "1.22" + clusterVersionUpgradeInitial = "1.27" + clusterVersionUpgradeUpdated = "1.28" ) func TestAccEKSCluster_basic(t *testing.T) { From 2f55a5ce8e3c884a294b66a374ccaaf835d49f75 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 8 Nov 2023 12:01:48 -0500 Subject: [PATCH 45/46] Correct vpc-cni add-on versions in acceptance tests. --- internal/service/eks/addon_data_source_test.go | 2 +- internal/service/eks/addon_test.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/eks/addon_data_source_test.go b/internal/service/eks/addon_data_source_test.go index 9091f9676b8..5b262d770e9 100644 --- a/internal/service/eks/addon_data_source_test.go +++ b/internal/service/eks/addon_data_source_test.go @@ -48,7 +48,7 @@ func TestAccEKSAddonDataSource_configurationValues(t *testing.T) { dataSourceResourceName := "data.aws_eks_addon.test" resourceName := "aws_eks_addon.test" addonName := "vpc-cni" - addonVersion := "v1.10.4-eksbuild.1" + addonVersion := "v1.15.3-eksbuild.1" configurationValues := "{\"env\": {\"WARM_ENI_TARGET\":\"2\",\"ENABLE_POD_ENI\":\"true\"},\"resources\": {\"limits\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"100Mi\"}}}" resource.ParallelTest(t, resource.TestCase{ diff --git a/internal/service/eks/addon_test.go b/internal/service/eks/addon_test.go index 9796bc64085..a0de3c7a0fc 100644 --- a/internal/service/eks/addon_test.go +++ b/internal/service/eks/addon_test.go @@ -114,8 +114,8 @@ func TestAccEKSAddon_addonVersion(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_addon.test" addonName := "vpc-cni" - addonVersion1 := "v1.12.5-eksbuild.2" - addonVersion2 := "v1.12.6-eksbuild.1" + addonVersion1 := "v1.14.1-eksbuild.1" + addonVersion2 := "v1.15.3-eksbuild.1" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, @@ -308,7 +308,7 @@ func TestAccEKSAddon_configurationValues(t *testing.T) { emptyConfigurationValues := "{}" invalidConfigurationValues := "{\"env\": {\"INVALID_FIELD\":\"2\"}}" addonName := "vpc-cni" - addonVersion := "v1.12.6-eksbuild.1" + addonVersion := "v1.15.3-eksbuild.1" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t); testAccPreCheckAddon(ctx, t) }, From 1c2ec4714b53054a63eb40c15e07e3e4dc1ae880 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 8 Nov 2023 15:30:33 -0500 Subject: [PATCH 46/46] Fix 'TestAccEKSNodeGroup_releaseVersion'. --- internal/service/eks/node_group_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/eks/node_group_test.go b/internal/service/eks/node_group_test.go index ebff80dd6c9..0b107e793b7 100644 --- a/internal/service/eks/node_group_test.go +++ b/internal/service/eks/node_group_test.go @@ -527,7 +527,7 @@ func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { CheckDestroy: testAccCheckNodeGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccNodeGroupConfig_releaseVersion(rName, "1.17"), + Config: testAccNodeGroupConfig_releaseVersion(rName, "1.27"), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup1), resource.TestCheckResourceAttrPair(resourceName, "release_version", ssmParameterDataSourceName, "value"), @@ -539,7 +539,7 @@ func TestAccEKSNodeGroup_releaseVersion(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccNodeGroupConfig_releaseVersion(rName, "1.18"), + Config: testAccNodeGroupConfig_releaseVersion(rName, "1.28"), Check: resource.ComposeTestCheckFunc( testAccCheckNodeGroupExists(ctx, resourceName, &nodeGroup2), testAccCheckNodeGroupNotRecreated(&nodeGroup1, &nodeGroup2),