diff --git a/.changelog/31607.txt b/.changelog/31607.txt new file mode 100644 index 00000000000..7344c3e368c --- /dev/null +++ b/.changelog/31607.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_pipes_pipe: Add `enrichment_parameters` argument +``` + +```release-note:enhancement +resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_parameters`, `kinesis_stream_parameters`, `managed_streaming_kafka_parameters`, `rabbitmq_broker_parameters`, `self_managed_kafka_parameters` and `sqs_queue_parameters` attributes to the `source_parameters` configuration block. NOTE: Because we cannot easily test all this functionality, it is best effort and we ask for community help in testing +``` + +```release-note:enhancement +resource/aws_pipes_pipe: Add `batch_job_parameters`, `cloudwatch_logs_parameters`, `ecs_task_parameters`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block. NOTE: Because we cannot easily test all this functionality, it is best effort and we ask for community help in testing +``` \ No newline at end of file diff --git a/internal/service/pipes/enrichment_parameters.go b/internal/service/pipes/enrichment_parameters.go new file mode 100644 index 00000000000..a97da43288a --- /dev/null +++ b/internal/service/pipes/enrichment_parameters.go @@ -0,0 +1,133 @@ +package pipes + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/pipes/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/flex" +) + +func enrichmentParametersSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_parameters": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "path_parameter_values": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "query_string_parameters": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "input_template": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 8192), + }, + }, + }, + } +} + +func expandPipeEnrichmentParameters(tfMap map[string]interface{}) *types.PipeEnrichmentParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeEnrichmentParameters{} + + if v, ok := tfMap["http_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.HttpParameters = expandPipeEnrichmentHTTPParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["input_template"].(string); ok && v != "" { + apiObject.InputTemplate = aws.String(v) + } + + return apiObject +} + +func expandPipeEnrichmentHTTPParameters(tfMap map[string]interface{}) *types.PipeEnrichmentHttpParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeEnrichmentHttpParameters{} + + if v, ok := tfMap["header_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.HeaderParameters = flex.ExpandStringValueMap(v) + } + + if v, ok := tfMap["path_parameter_values"].([]interface{}); ok && len(v) > 0 { + apiObject.PathParameterValues = flex.ExpandStringValueList(v) + } + + if v, ok := tfMap["query_string_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.QueryStringParameters = flex.ExpandStringValueMap(v) + } + + return apiObject +} + +func flattenPipeEnrichmentParameters(apiObject *types.PipeEnrichmentParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.HttpParameters; v != nil { + tfMap["http_parameters"] = []interface{}{flattenPipeEnrichmentHTTPParameters(v)} + } + + if v := apiObject.InputTemplate; v != nil { + tfMap["input_template"] = aws.ToString(v) + } + + return tfMap +} + +func flattenPipeEnrichmentHTTPParameters(apiObject *types.PipeEnrichmentHttpParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.HeaderParameters; v != nil { + tfMap["header_parameters"] = v + } + + if v := apiObject.PathParameterValues; v != nil { + tfMap["path_parameter_values"] = v + } + + if v := apiObject.QueryStringParameters; v != nil { + tfMap["query_string_parameters"] = v + } + + return tfMap +} diff --git a/internal/service/pipes/exports_test.go b/internal/service/pipes/exports_test.go new file mode 100644 index 00000000000..9ed928204bd --- /dev/null +++ b/internal/service/pipes/exports_test.go @@ -0,0 +1,8 @@ +package pipes + +// Exports for use in tests only. +var ( + FindPipeByName = findPipeByName + + ResourcePipe = resourcePipe +) diff --git a/internal/service/pipes/find.go b/internal/service/pipes/find.go deleted file mode 100644 index 7a3e3a59539..00000000000 --- a/internal/service/pipes/find.go +++ /dev/null @@ -1,37 +0,0 @@ -package pipes - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/aws/aws-sdk-go-v2/service/pipes/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindPipeByName(ctx context.Context, conn *pipes.Client, name string) (*pipes.DescribePipeOutput, error) { - input := &pipes.DescribePipeInput{ - Name: aws.String(name), - } - - output, err := conn.DescribePipe(ctx, input) - - if errs.IsA[*types.NotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Arn == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} diff --git a/internal/service/pipes/flex.go b/internal/service/pipes/flex.go deleted file mode 100644 index b3e0de87060..00000000000 --- a/internal/service/pipes/flex.go +++ /dev/null @@ -1,146 +0,0 @@ -package pipes - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/pipes/types" -) - -func expandFilter(tfMap map[string]interface{}) *types.Filter { - if tfMap == nil { - return nil - } - - output := &types.Filter{} - - if v, ok := tfMap["pattern"].(string); ok && len(v) > 0 { - output.Pattern = aws.String(v) - } - - return output -} - -func flattenFilter(apiObject types.Filter) map[string]interface{} { - m := map[string]interface{}{} - - if v := apiObject.Pattern; v != nil { - m["pattern"] = aws.ToString(v) - } - - return m -} - -func expandFilters(tfList []interface{}) []types.Filter { - if len(tfList) == 0 { - return nil - } - - var s []types.Filter - - for _, v := range tfList { - a := expandFilter(v.(map[string]interface{})) - - if a == nil { - continue - } - - s = append(s, *a) - } - - return s -} - -func flattenFilters(apiObjects []types.Filter) []interface{} { - if len(apiObjects) == 0 { - return nil - } - - var l []interface{} - - for _, apiObject := range apiObjects { - l = append(l, flattenFilter(apiObject)) - } - - return l -} - -func expandFilterCriteria(tfMap map[string]interface{}) *types.FilterCriteria { - if tfMap == nil { - return nil - } - - output := &types.FilterCriteria{} - - if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { - output.Filters = expandFilters(v) - } - - return output -} - -func flattenFilterCriteria(apiObject *types.FilterCriteria) map[string]interface{} { - if apiObject == nil { - return nil - } - - m := map[string]interface{}{} - - m["filter"] = flattenFilters(apiObject.Filters) - - return m -} - -func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceParameters { - if tfMap == nil { - return nil - } - - a := &types.PipeSourceParameters{} - - if v, ok := tfMap["filter_criteria"].([]interface{}); ok { - a.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) - } - - return a -} - -func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[string]interface{} { - if apiObject == nil { - return nil - } - - m := map[string]interface{}{} - - if v := apiObject.FilterCriteria; v != nil { - m["filter_criteria"] = []interface{}{flattenFilterCriteria(v)} - } - - return m -} - -func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetParameters { - if tfMap == nil { - return nil - } - - a := &types.PipeTargetParameters{} - - if v, ok := tfMap["input_template"].(string); ok { - a.InputTemplate = aws.String(v) - } - - return a -} - -func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[string]interface{} { - if apiObject == nil { - return nil - } - - m := map[string]interface{}{} - - if v := apiObject.InputTemplate; v != nil { - m["input_template"] = aws.ToString(v) - } - - return m -} diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index e81ed0fc290..65045acd5aa 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -4,13 +4,15 @@ import ( "context" "errors" "log" + "regexp" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/aws/aws-sdk-go-v2/service/pipes/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -19,13 +21,14 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_pipes_pipe", name="Pipe") // @Tags(identifierAttribute="arn") -func ResourcePipe() *schema.Resource { +func resourcePipe() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePipeCreate, ReadWithoutTimeout: resourcePipeRead, @@ -57,21 +60,25 @@ func ResourcePipe() *schema.Resource { "desired_state": { Type: schema.TypeString, Optional: true, - Default: string(types.RequestedPipeStateRunning), - ValidateDiagFunc: enum.Validate[types.RequestedPipeState](), + Default: string(awstypes.RequestedPipeStateRunning), + ValidateDiagFunc: enum.Validate[awstypes.RequestedPipeState](), }, "enrichment": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringLenBetween(1, 1600), + ValidateFunc: verify.ValidARN, }, + "enrichment_parameters": enrichmentParametersSchema(), "name": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validation.StringLenBetween(1, 64), + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[\.\-_A-Za-z0-9]+`), ""), + ), }, "name_prefix": { Type: schema.TypeString, @@ -79,7 +86,10 @@ func ResourcePipe() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"name"}, - ValidateFunc: validation.StringLenBetween(1, 64-id.UniqueIDSuffixLength), + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64-id.UniqueIDSuffixLength), + validation.StringMatch(regexp.MustCompile(`^[\.\-_A-Za-z0-9]+`), ""), + ), }, "role_arn": { Type: schema.TypeString, @@ -87,65 +97,23 @@ func ResourcePipe() *schema.Resource { ValidateFunc: verify.ValidARN, }, "source": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 1600), - }, - "source_parameters": { - Type: schema.TypeList, + Type: schema.TypeString, Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter_criteria": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressEmptyConfigurationBlock("source_parameters.0.filter_criteria"), - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter": { - Type: schema.TypeList, - Optional: true, - MaxItems: 5, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pattern": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 4096), - }, - }, - }, - }, - }, - }, - }, - }, - }, + ForceNew: true, + ValidateFunc: validation.Any( + verify.ValidARN, + validation.StringMatch(regexp.MustCompile(`^smk://(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9]):[0-9]{1,5}|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\d{1})?:(\d{12})?:(.+)$`), ""), + ), }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), + "source_parameters": sourceParametersSchema(), "target": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringLenBetween(1, 1600), - }, - "target_parameters": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "input_template": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 8192), - }, - }, - }, + ValidateFunc: verify.ValidARN, }, + "target_parameters": targetParametersSchema(), + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), }, } } @@ -159,7 +127,7 @@ func resourcePipeCreate(ctx context.Context, d *schema.ResourceData, meta interf name := create.Name(d.Get("name").(string), d.Get("name_prefix").(string)) input := &pipes.CreatePipeInput{ - DesiredState: types.RequestedPipeState(d.Get("desired_state").(string)), + DesiredState: awstypes.RequestedPipeState(d.Get("desired_state").(string)), Name: aws.String(name), RoleArn: aws.String(d.Get("role_arn").(string)), Source: aws.String(d.Get("source").(string)), @@ -167,31 +135,32 @@ func resourcePipeCreate(ctx context.Context, d *schema.ResourceData, meta interf Target: aws.String(d.Get("target").(string)), } - if v, ok := d.Get("description").(string); ok { - input.Description = aws.String(v) + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("enrichment"); ok && v != "" { + input.Enrichment = aws.String(v.(string)) } - if v, ok := d.Get("enrichment").(string); ok && v != "" { - input.Enrichment = aws.String(v) + if v, ok := d.GetOk("enrichment_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.EnrichmentParameters = expandPipeEnrichmentParameters(v.([]interface{})[0].(map[string]interface{})) } - if v, ok := d.Get("source_parameters").([]interface{}); ok && len(v) > 0 && v[0] != nil { - input.SourceParameters = expandPipeSourceParameters(v[0].(map[string]interface{})) + if v, ok := d.GetOk("source_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.SourceParameters = expandPipeSourceParameters(v.([]interface{})[0].(map[string]interface{})) } - if v, ok := d.Get("target_parameters").([]interface{}); ok && len(v) > 0 && v[0] != nil { - input.TargetParameters = expandPipeTargetParameters(v[0].(map[string]interface{})) + if v, ok := d.GetOk("target_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.TargetParameters = expandPipeTargetParameters(v.([]interface{})[0].(map[string]interface{})) } output, err := conn.CreatePipe(ctx, input) + if err != nil { return create.DiagError(names.Pipes, create.ErrActionCreating, ResNamePipe, name, err) } - if output == nil || output.Arn == nil { - return create.DiagError(names.Pipes, create.ErrActionCreating, ResNamePipe, name, errors.New("empty output")) - } - d.SetId(aws.ToString(output.Name)) if _, err := waitPipeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -204,7 +173,7 @@ func resourcePipeCreate(ctx context.Context, d *schema.ResourceData, meta interf func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*conns.AWSClient).PipesClient(ctx) - output, err := FindPipeByName(ctx, conn, d.Id()) + output, err := findPipeByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EventBridge Pipes Pipe (%s) not found, removing from state", d.Id()) @@ -220,23 +189,31 @@ func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("description", output.Description) d.Set("desired_state", output.DesiredState) d.Set("enrichment", output.Enrichment) + if v := output.EnrichmentParameters; !types.IsZero(v) { + if err := d.Set("enrichment_parameters", []interface{}{flattenPipeEnrichmentParameters(v)}); err != nil { + return diag.Errorf("setting enrichment_parameters: %s", err) + } + } else { + d.Set("enrichment_parameters", nil) + } d.Set("name", output.Name) d.Set("name_prefix", create.NamePrefixFromName(aws.ToString(output.Name))) - - if v := output.SourceParameters; v != nil { + d.Set("role_arn", output.RoleArn) + d.Set("source", output.Source) + if v := output.SourceParameters; !types.IsZero(v) { if err := d.Set("source_parameters", []interface{}{flattenPipeSourceParameters(v)}); err != nil { - return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) + return diag.Errorf("setting source_parameters: %s", err) } + } else { + d.Set("source_parameters", nil) } - - d.Set("role_arn", output.RoleArn) - d.Set("source", output.Source) d.Set("target", output.Target) - - if v := output.TargetParameters; v != nil { + if v := output.TargetParameters; !types.IsZero(v) { if err := d.Set("target_parameters", []interface{}{flattenPipeTargetParameters(v)}); err != nil { - return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) + return diag.Errorf("setting target_parameters: %s", err) } + } else { + d.Set("target_parameters", nil) } return nil @@ -248,45 +225,32 @@ func resourcePipeUpdate(ctx context.Context, d *schema.ResourceData, meta interf if d.HasChangesExcept("tags", "tags_all") { input := &pipes.UpdatePipeInput{ Description: aws.String(d.Get("description").(string)), - DesiredState: types.RequestedPipeState(d.Get("desired_state").(string)), + DesiredState: awstypes.RequestedPipeState(d.Get("desired_state").(string)), Name: aws.String(d.Id()), RoleArn: aws.String(d.Get("role_arn").(string)), Target: aws.String(d.Get("target").(string)), - - // Omitting the SourceParameters entirely is interpreted as "no change". - SourceParameters: &types.UpdatePipeSourceParameters{}, - TargetParameters: &types.PipeTargetParameters{}, + // Reset state in case it's a deletion, have to set the input to an empty string otherwise it doesn't get overwritten. + TargetParameters: &awstypes.PipeTargetParameters{ + InputTemplate: aws.String(""), + }, } if d.HasChange("enrichment") { - // Reset state in case it's a deletion. - input.Enrichment = aws.String("") - } - - if v, ok := d.Get("enrichment").(string); ok && v != "" { - input.Enrichment = aws.String(v) - } - - if d.HasChange("source_parameters.0.filter_criteria") { - // To unset a parameter, it must be set to an empty object. Nulling a - // parameter will be interpreted as "no change". - input.SourceParameters.FilterCriteria = &types.FilterCriteria{} + input.Enrichment = aws.String(d.Get("enrichment").(string)) } - if v, ok := d.Get("source_parameters.0.filter_criteria").([]interface{}); ok && len(v) > 0 && v[0] != nil { - input.SourceParameters.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) + if v, ok := d.GetOk("enrichment_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.EnrichmentParameters = expandPipeEnrichmentParameters(v.([]interface{})[0].(map[string]interface{})) } - if d.HasChange("target_parameters.0.input_template") { - input.TargetParameters.InputTemplate = aws.String("") + if v, ok := d.GetOk("source_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.SourceParameters = expandUpdatePipeSourceParameters(v.([]interface{})[0].(map[string]interface{})) } - if v, ok := d.Get("target_parameters.0.input_template").(string); ok { - input.TargetParameters.InputTemplate = aws.String(v) + if v, ok := d.GetOk("target_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.TargetParameters = expandPipeTargetParameters(v.([]interface{})[0].(map[string]interface{})) } - log.Printf("[DEBUG] Updating EventBridge Pipes Pipe (%s): %#v", d.Id(), input) - output, err := conn.UpdatePipe(ctx, input) if err != nil { @@ -309,7 +273,7 @@ func resourcePipeDelete(ctx context.Context, d *schema.ResourceData, meta interf Name: aws.String(d.Id()), }) - if errs.IsA[*types.NotFoundException](err) { + if errs.IsA[*awstypes.NotFoundException](err) { return nil } @@ -324,6 +288,105 @@ func resourcePipeDelete(ctx context.Context, d *schema.ResourceData, meta interf return nil } +func findPipeByName(ctx context.Context, conn *pipes.Client, name string) (*pipes.DescribePipeOutput, error) { + input := &pipes.DescribePipeInput{ + Name: aws.String(name), + } + + output, err := conn.DescribePipe(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Arn == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusPipe(ctx context.Context, conn *pipes.Client, name string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findPipeByName(ctx, conn, name) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.CurrentState), nil + } +} + +func waitPipeCreated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.PipeStateCreating), + Target: enum.Slice(awstypes.PipeStateRunning, awstypes.PipeStateStopped), + Refresh: statusPipe(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 1, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) + + return output, err + } + + return nil, err +} + +func waitPipeUpdated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.PipeStateUpdating), + Target: enum.Slice(awstypes.PipeStateRunning, awstypes.PipeStateStopped), + Refresh: statusPipe(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 1, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) + + return output, err + } + + return nil, err +} + +func waitPipeDeleted(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.PipeStateDeleting), + Target: []string{}, + Refresh: statusPipe(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) + + return output, err + } + + return nil, err +} + func suppressEmptyConfigurationBlock(key string) schema.SchemaDiffSuppressFunc { return func(k, o, n string, d *schema.ResourceData) bool { if k != key+".#" { diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index 38fc2db22d9..c6a093fe1f0 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -20,13 +20,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccPipesPipe_basic(t *testing.T) { +func TestAccPipesPipe_basicSQS(t *testing.T) { ctx := acctest.Context(t) - - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" @@ -42,20 +37,31 @@ func TestAccPipesPipe_basic(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Config: testAccPipeConfig_basicSQS(rName), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.0.batch_size", "10"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.0.maximum_batching_window_in_seconds", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), - resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "0"), ), }, { @@ -69,10 +75,6 @@ func TestAccPipesPipe_basic(t *testing.T) { func TestAccPipesPipe_disappears(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" @@ -88,7 +90,7 @@ func TestAccPipesPipe_disappears(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfpipes.ResourcePipe(), resourceName), @@ -101,12 +103,8 @@ func TestAccPipesPipe_disappears(t *testing.T) { func TestAccPipesPipe_description(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -120,7 +118,7 @@ func TestAccPipesPipe_description(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_description(name, "Description 1"), + Config: testAccPipeConfig_description(rName, "Description 1"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "description", "Description 1"), @@ -132,53 +130,34 @@ func TestAccPipesPipe_description(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_description(name, "Description 2"), + Config: testAccPipeConfig_description(rName, "Description 2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "description", "Description 2"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_description(name, ""), + Config: testAccPipeConfig_description(rName, ""), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_desiredState(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -192,7 +171,7 @@ func TestAccPipesPipe_desiredState(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_desiredState(name, "STOPPED"), + Config: testAccPipeConfig_desiredState(rName, "STOPPED"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "desired_state", "STOPPED"), @@ -204,53 +183,34 @@ func TestAccPipesPipe_desiredState(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_desiredState(name, "RUNNING"), + Config: testAccPipeConfig_desiredState(rName, "RUNNING"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_desiredState(name, "STOPPED"), + Config: testAccPipeConfig_desiredState(rName, "STOPPED"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "desired_state", "STOPPED"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_enrichment(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -264,7 +224,7 @@ func TestAccPipesPipe_enrichment(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_enrichment(name, 0), + Config: testAccPipeConfig_enrichment(rName, 0), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test.0", "arn"), @@ -276,41 +236,82 @@ func TestAccPipesPipe_enrichment(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_enrichment(name, 1), + Config: testAccPipeConfig_enrichment(rName, 1), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test.1", "arn"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "enrichment", ""), ), }, + }, + }) +} + +func TestAccPipesPipe_enrichmentParameters(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_enrichmentParameters(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.X-Test-1", "Val1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameter_values.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameter_values.0", "p1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string_parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string_parameters.q1", "abc"), + ), + }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, + { + Config: testAccPipeConfig_enrichmentParametersUpdated(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.%", "2"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.X-Test-1", "Val1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.X-Test-2", "Val2"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameter_values.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameter_values.0", "p2"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string_parameters.%", "0"), + ), + }, }, }) } func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -324,9 +325,11 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_sourceParameters_filterCriteria1(name, "test1"), + Config: testAccPipeConfig_sourceParameters_filterCriteria1(rName, "test1"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.0.pattern", `{"source":["test1"]}`), ), @@ -337,37 +340,32 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_sourceParameters_filterCriteria2(name, "test1", "test2"), + Config: testAccPipeConfig_sourceParameters_filterCriteria2(rName, "test1", "test2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "2"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.0.pattern", `{"source":["test1"]}`), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.1.pattern", `{"source":["test2"]}`), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_sourceParameters_filterCriteria1(name, "test2"), + Config: testAccPipeConfig_sourceParameters_filterCriteria1(rName, "test2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.0.pattern", `{"source":["test2"]}`), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_sourceParameters_filterCriteria0(name), + Config: testAccPipeConfig_sourceParameters_filterCriteria0(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), - resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), ), }, { @@ -376,41 +374,31 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_sourceParameters_filterCriteria1(name, "test2"), + Config: testAccPipeConfig_sourceParameters_filterCriteria1(rName, "test2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.0.pattern", `{"source":["test2"]}`), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), - resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_nameGenerated(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -424,7 +412,7 @@ func TestAccPipesPipe_nameGenerated(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_nameGenerated(), + Config: testAccPipeConfig_nameGenerated(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.CheckResourceAttrNameGenerated(resourceName, "name"), @@ -442,11 +430,8 @@ func TestAccPipesPipe_nameGenerated(t *testing.T) { func TestAccPipesPipe_namePrefix(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -460,7 +445,7 @@ func TestAccPipesPipe_namePrefix(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_namePrefix("tf-acc-test-prefix-"), + Config: testAccPipeConfig_namePrefix(rName, "tf-acc-test-prefix-"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.CheckResourceAttrNameFromPrefix(resourceName, "name", "tf-acc-test-prefix-"), @@ -478,12 +463,8 @@ func TestAccPipesPipe_namePrefix(t *testing.T) { func TestAccPipesPipe_roleARN(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -497,7 +478,7 @@ func TestAccPipesPipe_roleARN(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), @@ -509,29 +490,20 @@ func TestAccPipesPipe_roleARN(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_roleARN(name), + Config: testAccPipeConfig_roleARN(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test2", "arn"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_tags(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -545,7 +517,7 @@ func TestAccPipesPipe_tags(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_tags1(name, "key1", "value1"), + Config: testAccPipeConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -558,7 +530,7 @@ func TestAccPipesPipe_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_tags2(name, "key1", "value1updated", "key2", "value2"), + Config: testAccPipeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), @@ -567,35 +539,21 @@ func TestAccPipesPipe_tags(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_tags1(name, "key2", "value2"), + Config: testAccPipeConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } -func TestAccPipesPipe_target(t *testing.T) { +func TestAccPipesPipe_targetUpdate(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -609,7 +567,7 @@ func TestAccPipesPipe_target(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), @@ -621,29 +579,20 @@ func TestAccPipesPipe_target(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_target(name), + Config: testAccPipeConfig_targetUpdated(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target2", "arn"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -657,7 +606,7 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_targetParameters_inputTemplate(name, "$.first"), + Config: testAccPipeConfig_targetParameters_inputTemplate(rName, "$.first"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", "$.first"), @@ -669,24 +618,87 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_targetParameters_inputTemplate(name, "$.second"), + Config: testAccPipeConfig_targetParameters_inputTemplate(rName, "$.second"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", "$.second"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.input_template"), ), }, + }, + }) +} + +func TestAccPipesPipe_kinesisSourceAndTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicKinesis(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_kinesis_stream.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.batch_size", "100"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.dead_letter_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.maximum_record_age_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.maximum_retry_attempts", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.on_partial_batch_item_failure", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.parallelization_factor", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.starting_position", "LATEST"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.starting_position_timestamp", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_kinesis_stream.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.0.partition_key", "test"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, { ResourceName: resourceName, ImportState: true, @@ -696,542 +708,2330 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { }) } -func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_pipes_pipe" { - continue - } - - _, err := tfpipes.FindPipeByName(ctx, conn, rs.Primary.ID) - - if tfresource.NotFound(err) { - continue - } - - if err != nil { - return err - } - - return create.Error(names.Pipes, create.ErrActionCheckingDestroyed, tfpipes.ResNamePipe, rs.Primary.ID, errors.New("not destroyed")) - } +func TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" - return nil - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicDynamoDBSourceCloudWatchLogsTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_dynamodb_table.source", "stream_arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.batch_size", "100"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.dead_letter_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.maximum_record_age_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.maximum_retry_attempts", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.on_partial_batch_item_failure", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.parallelization_factor", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.starting_position", "LATEST"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_cloudwatch_log_group.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.cloudwatch_logs_parameters.0.log_stream_name", "aws_cloudwatch_log_stream.target", "name"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.0.timestamp", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) } -func testAccCheckPipeExists(ctx context.Context, name string, pipe *pipes.DescribePipeOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return create.Error(names.Pipes, create.ErrActionCheckingExistence, tfpipes.ResNamePipe, name, errors.New("not found")) - } +func TestAccPipesPipe_activeMQSourceStepFunctionTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" - if rs.Primary.ID == "" { - return create.Error(names.Pipes, create.ErrActionCheckingExistence, tfpipes.ResNamePipe, name, errors.New("not set")) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicActiveMQSourceStepFunctionTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_mq_broker.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.0.batch_size", "100"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.0.credentials.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "source_parameters.0.activemq_broker_parameters.0.credentials.0.basic_auth"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.0.queue_name", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sfn_state_machine.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.0.invocation_type", "REQUEST_RESPONSE"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_rabbitMQSourceEventBusTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicRabbitMQSourceEventBusTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_mq_broker.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.batch_size", "10"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.credentials.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.credentials.0.basic_auth"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.queue_name", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.virtual_host", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_cloudwatch_event_bus.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_mskSourceHTTPTarget(t *testing.T) { + acctest.Skip(t, "DependencyViolation errors deleting subnets and security group") + + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicMSKSourceHTTPTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_msk_cluster.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.batch_size", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.consumer_group_id", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.credentials.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.starting_position", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.topic_name", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "target"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.header_parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.header_parameters.X-Test", "test"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.path_parameter_values.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.path_parameter_values.0", "p1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.query_string_parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.query_string_parameters.testing", "yes"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_selfManagedKafkaSourceLambdaFunctionTarget(t *testing.T) { + acctest.Skip(t, "DependencyViolation errors deleting subnets and security group") + + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSelfManagedKafkaSourceLambdaFunctionTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "source", "smk://test1:9092,test2:9092"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.additional_bootstrap_servers.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.additional_bootstrap_servers.*", "testing:1234"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.batch_size", "100"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.consumer_group_id", "self-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.credentials.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.server_root_ca_certificate", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.starting_position", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.topic_name", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.vpc.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.vpc.0.security_groups.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.vpc.0.subnets.#", "2"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_lambda_function.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.0.invocation_type", "REQUEST_RESPONSE"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_sqsSourceRedshiftTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSQSSourceRedshiftTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.0.batch_size", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.0.maximum_batching_window_in_seconds", "90"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_redshift_cluster.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.database", "db1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.db_user", "user1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.secret_manager_arn", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.sqls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.statement_name", "SelectAll"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.with_event", "false"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_SourceSageMakerTarget(t *testing.T) { + acctest.Skip(t, "aws_sagemaker_pipeline resource not yet implemented") + + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSQSSourceSageMakerTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sagemaker_pipeline.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.#", "2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.0.name", "p1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.0.value", "v1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.1.name", "p2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.1.value", "v2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_sqsSourceBatchJobTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSQSSourceBatchJobTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_batch_job_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.array_properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.array_properties.0.size", "512"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.command.#", "3"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.command.0", "rm"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.command.1", "-fr"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.command.2", "/"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.environment.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.environment.0.name", "TMP"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.environment.0.value", "/tmp2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.instance_type", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.resource_requirement.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.resource_requirement.0.type", "GPU"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.resource_requirement.0.value", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.depends_on.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "target_parameters.0.batch_job_parameters.0.job_definition"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.job_name", "testing"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.parameters.Key1", "Value1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.retry_strategy.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_sqsSourceECSTaskTarget(t *testing.T) { + acctest.Skip(t, "ValidationException: [numeric instance is lower than the required minimum (minimum: 1, found: 0)]") + + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSQSSourceECSTaskTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_ecs_cluster.target", "id"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.capacity_provider_strategy.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.enable_ecs_managed_tags", "true"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.enable_execute_command", "false"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.group", "g1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.launch_type", "FARGATE"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.0.aws_vpc_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.0.aws_vpc_configuration.0.assign_public_ip", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.0.aws_vpc_configuration.0.security_groups.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.0.aws_vpc_configuration.0.subnets.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.command.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.cpu", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.environment.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.environment.0.name", "TMP"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.environment.0.value", "/tmp2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.environment_file.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.memory", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.memory_reservation", "1024"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.name", "first"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.resource_requirement.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.resource_requirement.0.type", "GPU"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.resource_requirement.0.value", "2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.cpu", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.ephemeral_storage.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.ephemeral_storage.0.size_in_gib", "32"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.execution_role_arn", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.inference_accelerator_override.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.memory", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.task_role_arn", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.placement_constraint.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.placement_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.placement_strategy.0.field", "cpu"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.placement_strategy.0.type", "binpack"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.platform_version", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.propagate_tags", "TASK_DEFINITION"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.reference_id", "refid"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.task_count", "1"), + resource.TestCheckResourceAttrSet(resourceName, "target_parameters.0.ecs_task_parameters.0.task_definition_arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_pipes_pipe" { + continue + } + + _, err := tfpipes.FindPipeByName(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return create.Error(names.Pipes, create.ErrActionCheckingDestroyed, tfpipes.ResNamePipe, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckPipeExists(ctx context.Context, name string, pipe *pipes.DescribePipeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.Pipes, create.ErrActionCheckingExistence, tfpipes.ResNamePipe, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.Pipes, create.ErrActionCheckingExistence, tfpipes.ResNamePipe, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) + + output, err := tfpipes.FindPipeByName(ctx, conn, rs.Primary.ID) + + if err != nil { + return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) + *pipe = *output + + return nil + } +} + +func testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) + + input := &pipes.ListPipesInput{} + _, err := conn.ListPipes(ctx, input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccPipeConfig_base(rName string) string { + return fmt.Sprintf(` +data "aws_caller_identity" "main" {} +data "aws_partition" "main" {} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = { + Effect = "Allow" + Action = "sts:AssumeRole" + Principal = { + Service = "pipes.${data.aws_partition.main.dns_suffix}" + } + Condition = { + StringEquals = { + "aws:SourceAccount" = data.aws_caller_identity.main.account_id + } + } + } + }) +} +`, rName) +} + +func testAccPipeConfig_baseSQSSource(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:ReceiveMessage", + ], + Resource = [ + aws_sqs_queue.source.arn, + ] + }, + ] + }) +} + +resource "aws_sqs_queue" "source" { + name = "%[1]s-source" +} +`, rName) +} + +func testAccPipeConfig_baseSQSTarget(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role_policy" "target" { + role = aws_iam_role.test.id + name = "%[1]s-target" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "sqs:SendMessage", + ], + Resource = [ + aws_sqs_queue.target.arn, + ] + }, + ] + }) +} + +resource "aws_sqs_queue" "target" { + name = "%[1]s-target" +} +`, rName) +} + +func testAccPipeConfig_baseKinesisSource(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "kinesis:DescribeStream", + "kinesis:DescribeStreamSummary", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards", + "kinesis:ListStreams", + "kinesis:SubscribeToShard", + ], + Resource = [ + aws_kinesis_stream.source.arn, + ] + }, + ] + }) +} + +resource "aws_kinesis_stream" "source" { + name = "%[1]s-source" + + stream_mode_details { + stream_mode = "ON_DEMAND" + } +} +`, rName) +} + +func testAccPipeConfig_baseKinesisTarget(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role_policy" "target" { + role = aws_iam_role.test.id + name = "%[1]s-target" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "kinesis:PutRecord", + ], + Resource = [ + aws_kinesis_stream.target.arn, + ] + }, + ] + }) +} + +resource "aws_kinesis_stream" "target" { + name = "%[1]s-target" + + stream_mode_details { + stream_mode = "ON_DEMAND" + } +} +`, rName) +} + +func testAccPipeConfig_basicSQS(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn +} +`, rName)) +} + +func testAccPipeConfig_description(rName, description string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + description = %[2]q +} +`, rName, description)) +} + +func testAccPipeConfig_desiredState(rName, state string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + desired_state = %[2]q +} +`, rName, state)) +} + +func testAccPipeConfig_enrichment(rName string, i int) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_cloudwatch_event_connection" "test" { + name = %[1]q + authorization_type = "API_KEY" + + auth_parameters { + api_key { + key = "testKey" + value = "testValue" + } + } +} + +resource "aws_cloudwatch_event_api_destination" "test" { + count = 2 + name = "%[1]s-${count.index}" + invocation_endpoint = "https://example.com/${count.index}" + http_method = "POST" + connection_arn = aws_cloudwatch_event_connection.test.arn +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + enrichment = aws_cloudwatch_event_api_destination.test[%[2]d].arn +} +`, rName, i)) +} + +func testAccPipeConfig_enrichmentParameters(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_cloudwatch_event_connection" "test" { + name = %[1]q + authorization_type = "API_KEY" + + auth_parameters { + api_key { + key = "testKey" + value = "testValue" + } + } +} + +resource "aws_cloudwatch_event_api_destination" "test" { + name = %[1]q + invocation_endpoint = "https://example.com/" + http_method = "POST" + connection_arn = aws_cloudwatch_event_connection.test.arn +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + enrichment = aws_cloudwatch_event_api_destination.test.arn + + enrichment_parameters { + http_parameters { + header_parameters = { + "X-Test-1" = "Val1" + } + + path_parameter_values = ["p1"] + + query_string_parameters = { + "q1" = "abc" + } + } + } +} +`, rName)) +} + +func testAccPipeConfig_enrichmentParametersUpdated(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_cloudwatch_event_connection" "test" { + name = %[1]q + authorization_type = "API_KEY" + + auth_parameters { + api_key { + key = "testKey" + value = "testValue" + } + } +} + +resource "aws_cloudwatch_event_api_destination" "test" { + name = %[1]q + invocation_endpoint = "https://example.com/" + http_method = "POST" + connection_arn = aws_cloudwatch_event_connection.test.arn +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + enrichment = aws_cloudwatch_event_api_destination.test.arn + + enrichment_parameters { + http_parameters { + header_parameters = { + "X-Test-1" = "Val1" + "X-Test-2" = "Val2" + } + + path_parameter_values = ["p2"] + } + } +} +`, rName)) +} + +func testAccPipeConfig_sourceParameters_filterCriteria1(rName, criteria1 string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + source_parameters { + filter_criteria { + filter { + pattern = jsonencode({ + source = [%[2]q] + }) + } + } + } +} +`, rName, criteria1)) +} + +func testAccPipeConfig_sourceParameters_filterCriteria2(rName, criteria1, criteria2 string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + source_parameters { + filter_criteria { + filter { + pattern = jsonencode({ + source = [%[2]q] + }) + } + + filter { + pattern = jsonencode({ + source = [%[3]q] + }) + } + } + } +} +`, rName, criteria1, criteria2)) +} + +func testAccPipeConfig_sourceParameters_filterCriteria0(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + source_parameters { + filter_criteria {} + } +} +`, rName)) +} + +func testAccPipeConfig_nameGenerated(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + ` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn +} +`, + ) +} + +func testAccPipeConfig_namePrefix(rName, namePrefix string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name_prefix = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn +} +`, namePrefix)) +} + +func testAccPipeConfig_roleARN(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_iam_role" "test2" { + name = "%[1]s-2" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = { + Effect = "Allow" + Action = "sts:AssumeRole" + Principal = { + Service = "pipes.${data.aws_partition.main.dns_suffix}" + } + Condition = { + StringEquals = { + "aws:SourceAccount" = data.aws_caller_identity.main.account_id + } + } + } + }) +} + +resource "aws_iam_role_policy" "source2" { + role = aws_iam_role.test2.id + name = "%[1]s-source2" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:ReceiveMessage", + ], + Resource = [ + aws_sqs_queue.source.arn, + ] + }, + ] + }) +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source2, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test2.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn +} +`, rName)) +} + +func testAccPipeConfig_tags1(rName, tag1Key, tag1Value string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + tags = { + %[2]q = %[3]q + } +} +`, rName, tag1Key, tag1Value)) +} + +func testAccPipeConfig_tags2(rName, tag1Key, tag1Value, tag2Key, tag2Value string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tag1Key, tag1Value, tag2Key, tag2Value)) +} + +func testAccPipeConfig_targetUpdated(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_iam_role_policy" "target2" { + role = aws_iam_role.test.id + name = "%[1]s-target2" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "sqs:SendMessage", + ], + Resource = [ + aws_sqs_queue.target2.arn, + ] + }, + ] + }) +} + +resource "aws_sqs_queue" "target2" { + name = "%[1]s-target2" +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target2] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target2.arn +} +`, rName)) +} + +func testAccPipeConfig_targetParameters_inputTemplate(rName, template string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + target_parameters { + input_template = %[2]q + } +} +`, rName, template)) +} + +func testAccPipeConfig_basicKinesis(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseKinesisSource(rName), + testAccPipeConfig_baseKinesisTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_kinesis_stream.source.arn + target = aws_kinesis_stream.target.arn + + source_parameters { + kinesis_stream_parameters { + starting_position = "LATEST" + } + } + + target_parameters { + kinesis_stream_parameters { + partition_key = "test" + } + } +} +`, rName)) +} - output, err := tfpipes.FindPipeByName(ctx, conn, rs.Primary.ID) +func testAccPipeConfig_basicDynamoDBSourceCloudWatchLogsTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" - if err != nil { - return err - } + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "dynamodb:DescribeStream", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:ListStreams", + ], + Resource = [ + aws_dynamodb_table.source.stream_arn, + "${aws_dynamodb_table.source.stream_arn}/*" + ] + }, + ] + }) +} - *pipe = *output +resource "aws_dynamodb_table" "source" { + name = "%[1]s-source" + billing_mode = "PAY_PER_REQUEST" + hash_key = "PK" + range_key = "SK" + stream_enabled = true + stream_view_type = "NEW_AND_OLD_IMAGES" + + attribute { + name = "PK" + type = "S" + } - return nil - } + attribute { + name = "SK" + type = "S" + } } -func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) +resource "aws_iam_role_policy" "target" { + role = aws_iam_role.test.id + name = "%[1]s-target" - input := &pipes.ListPipesInput{} - _, err := conn.ListPipes(ctx, input) + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "logs:PutLogEvents", + ], + Resource = [ + aws_cloudwatch_log_stream.target.arn, + ] + }, + ] + }) +} - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } +resource "aws_cloudwatch_log_group" "target" { + name = "%[1]s-target" +} - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } +resource "aws_cloudwatch_log_stream" "target" { + name = "%[1]s-target" + log_group_name = aws_cloudwatch_log_group.target.name } -const testAccPipeConfig_base = ` -data "aws_caller_identity" "main" {} -data "aws_partition" "main" {} +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] -resource "aws_iam_role" "test" { - assume_role_policy = jsonencode({ + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_dynamodb_table.source.stream_arn + target = aws_cloudwatch_log_group.target.arn + + source_parameters { + dynamodb_stream_parameters { + starting_position = "LATEST" + } + } + + target_parameters { + cloudwatch_logs_parameters { + log_stream_name = aws_cloudwatch_log_stream.target.name + } + } +} +`, rName)) +} + +func testAccPipeConfig_basicActiveMQSourceStepFunctionTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" + + policy = jsonencode({ Version = "2012-10-17" - Statement = { - Effect = "Allow" - Action = "sts:AssumeRole" - Principal = { - Service = "pipes.${data.aws_partition.main.dns_suffix}" - } - Condition = { - StringEquals = { - "aws:SourceAccount" = data.aws_caller_identity.main.account_id - } + Statement = [ + { + Effect = "Allow" + Action = [ + "mq:DescribeBroker", + "secretsmanager:GetSecretValue", + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeVpcs", + "ec2:DeleteNetworkInterface", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + Resource = [ + "*" + ] + }, + ] + }) + + depends_on = [aws_mq_broker.source] +} + +resource "aws_security_group" "source" { + name = "%[1]s-source" + + ingress { + from_port = 61617 + to_port = 61617 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = %[1]q + } +} + +resource "aws_mq_broker" "source" { + broker_name = "%[1]s-source" + engine_type = "ActiveMQ" + engine_version = "5.15.0" + host_instance_type = "mq.t2.micro" + security_groups = [aws_security_group.source.id] + authentication_strategy = "simple" + storage_type = "efs" + + logs { + general = true + } + + user { + username = "Test" + password = "TestTest1234" + } + + publicly_accessible = true +} + +resource "aws_secretsmanager_secret" "source" { + name = "%[1]s-source" +} + +resource "aws_secretsmanager_secret_version" "source" { + secret_id = aws_secretsmanager_secret.source.id + secret_string = jsonencode({ username = "Test", password = "TestTest1234" }) +} + +resource "aws_iam_role" "target" { + name = "%[1]s-target" + + assume_role_policy = < 0 && v[0] != nil { + apiObject.ActiveMQBrokerParameters = expandPipeSourceActiveMQBrokerParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["dynamodb_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DynamoDBStreamParameters = expandPipeSourceDynamoDBStreamParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["filter_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["kinesis_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.KinesisStreamParameters = expandPipeSourceKinesisStreamParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["managed_streaming_kafka_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ManagedStreamingKafkaParameters = expandPipeSourceManagedStreamingKafkaParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["rabbitmq_broker_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RabbitMQBrokerParameters = expandPipeSourceRabbitMQBrokerParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["self_managed_kafka_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SelfManagedKafkaParameters = expandPipeSourceSelfManagedKafkaParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SqsQueueParameters = expandPipeSourceSQSQueueParameters(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandUpdatePipeSourceParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceParameters{} + + if v, ok := tfMap["activemq_broker_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ActiveMQBrokerParameters = expandUpdatePipeSourceActiveMQBrokerParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["dynamodb_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DynamoDBStreamParameters = expandUpdatePipeSourceDynamoDBStreamParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["filter_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) + } else { + apiObject.FilterCriteria = &types.FilterCriteria{} + } + + if v, ok := tfMap["kinesis_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.KinesisStreamParameters = expandUpdatePipeSourceKinesisStreamParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["managed_streaming_kafka_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ManagedStreamingKafkaParameters = expandUpdatePipeSourceManagedStreamingKafkaParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["rabbitmq_broker_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RabbitMQBrokerParameters = expandUpdatePipeSourceRabbitMQBrokerParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["self_managed_kafka_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SelfManagedKafkaParameters = expandUpdatePipeSourceSelfManagedKafkaParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SqsQueueParameters = expandUpdatePipeSourceSQSQueueParameters(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandFilterCriteria(tfMap map[string]interface{}) *types.FilterCriteria { + if tfMap == nil { + return nil + } + + apiObject := &types.FilterCriteria{} + + if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { + apiObject.Filters = expandFilters(v) + } + + return apiObject +} + +func expandFilter(tfMap map[string]interface{}) *types.Filter { + if tfMap == nil { + return nil + } + + apiObject := &types.Filter{} + + if v, ok := tfMap["pattern"].(string); ok && v != "" { + apiObject.Pattern = aws.String(v) + } + + return apiObject +} + +func expandFilters(tfList []interface{}) []types.Filter { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.Filter + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandFilter(tfMap) + + if apiObject == nil || apiObject.Pattern == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandPipeSourceActiveMQBrokerParameters(tfMap map[string]interface{}) *types.PipeSourceActiveMQBrokerParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceActiveMQBrokerParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMQBrokerAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["queue_name"].(string); ok && v != "" { + apiObject.QueueName = aws.String(v) + } + + return apiObject +} + +func expandUpdatePipeSourceActiveMQBrokerParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceActiveMQBrokerParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceActiveMQBrokerParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMQBrokerAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandMQBrokerAccessCredentials(tfMap map[string]interface{}) types.MQBrokerAccessCredentials { + if tfMap == nil { + return nil + } + + if v, ok := tfMap["basic_auth"].(string); ok && v != "" { + apiObject := &types.MQBrokerAccessCredentialsMemberBasicAuth{ + Value: v, + } + + return apiObject + } + + return nil +} + +func expandPipeSourceDynamoDBStreamParameters(tfMap map[string]interface{}) *types.PipeSourceDynamoDBStreamParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceDynamoDBStreamParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["dead_letter_config"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DeadLetterConfig = expandDeadLetterConfig(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_record_age_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumRecordAgeInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_retry_attempts"].(int); ok && v != 0 { + apiObject.MaximumRetryAttempts = aws.Int32(int32(v)) + } + + if v, ok := tfMap["on_partial_batch_item_failure"].(string); ok && v != "" { + apiObject.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(v) + } + + if v, ok := tfMap["parallelization_factor"].(int); ok && v != 0 { + apiObject.ParallelizationFactor = aws.Int32(int32(v)) + } + + if v, ok := tfMap["starting_position"].(string); ok && v != "" { + apiObject.StartingPosition = types.DynamoDBStreamStartPosition(v) + } + + return apiObject +} + +func expandUpdatePipeSourceDynamoDBStreamParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceDynamoDBStreamParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceDynamoDBStreamParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["dead_letter_config"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DeadLetterConfig = expandDeadLetterConfig(v[0].(map[string]interface{})) + } else { + apiObject.DeadLetterConfig = &types.DeadLetterConfig{} + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_record_age_in_seconds"].(int); ok { + apiObject.MaximumRecordAgeInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_retry_attempts"].(int); ok { + apiObject.MaximumRetryAttempts = aws.Int32(int32(v)) + } + + if v, ok := tfMap["on_partial_batch_item_failure"].(string); ok { + apiObject.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(v) + } + + if v, ok := tfMap["parallelization_factor"].(int); ok { + apiObject.ParallelizationFactor = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandDeadLetterConfig(tfMap map[string]interface{}) *types.DeadLetterConfig { + if tfMap == nil { + return nil + } + + apiObject := &types.DeadLetterConfig{} + + if v, ok := tfMap["arn"].(string); ok && v != "" { + apiObject.Arn = aws.String(v) + } + + return apiObject +} + +func expandPipeSourceKinesisStreamParameters(tfMap map[string]interface{}) *types.PipeSourceKinesisStreamParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceKinesisStreamParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["dead_letter_config"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DeadLetterConfig = expandDeadLetterConfig(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_record_age_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumRecordAgeInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_retry_attempts"].(int); ok && v != 0 { + apiObject.MaximumRetryAttempts = aws.Int32(int32(v)) + } + + if v, ok := tfMap["on_partial_batch_item_failure"].(string); ok && v != "" { + apiObject.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(v) + } + + if v, ok := tfMap["parallelization_factor"].(int); ok && v != 0 { + apiObject.ParallelizationFactor = aws.Int32(int32(v)) + } + + if v, ok := tfMap["starting_position"].(string); ok && v != "" { + apiObject.StartingPosition = types.KinesisStreamStartPosition(v) + } + + if v, ok := tfMap["starting_position_timestamp"].(string); ok && v != "" { + v, _ := time.Parse(time.RFC3339, v) + + apiObject.StartingPositionTimestamp = aws.Time(v) + } + + return apiObject +} + +func expandUpdatePipeSourceKinesisStreamParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceKinesisStreamParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceKinesisStreamParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["dead_letter_config"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DeadLetterConfig = expandDeadLetterConfig(v[0].(map[string]interface{})) + } else { + apiObject.DeadLetterConfig = &types.DeadLetterConfig{} + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_record_age_in_seconds"].(int); ok { + apiObject.MaximumRecordAgeInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_retry_attempts"].(int); ok { + apiObject.MaximumRetryAttempts = aws.Int32(int32(v)) + } + + if v, ok := tfMap["on_partial_batch_item_failure"].(string); ok { + apiObject.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(v) + } + + if v, ok := tfMap["parallelization_factor"].(int); ok { + apiObject.ParallelizationFactor = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandPipeSourceManagedStreamingKafkaParameters(tfMap map[string]interface{}) *types.PipeSourceManagedStreamingKafkaParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceManagedStreamingKafkaParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["consumer_group_id"].(string); ok && v != "" { + apiObject.ConsumerGroupID = aws.String(v) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMSKAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["starting_position"].(string); ok && v != "" { + apiObject.StartingPosition = types.MSKStartPosition(v) + } + + if v, ok := tfMap["topic_name"].(string); ok && v != "" { + apiObject.TopicName = aws.String(v) + } + + return apiObject +} + +func expandUpdatePipeSourceManagedStreamingKafkaParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceManagedStreamingKafkaParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceManagedStreamingKafkaParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMSKAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandMSKAccessCredentials(tfMap map[string]interface{}) types.MSKAccessCredentials { + if tfMap == nil { + return nil + } + + if v, ok := tfMap["client_certificate_tls_auth"].(string); ok && v != "" { + apiObject := &types.MSKAccessCredentialsMemberClientCertificateTlsAuth{ + Value: v, + } + + return apiObject + } + + if v, ok := tfMap["sasl_scram_512_auth"].(string); ok && v != "" { + apiObject := &types.MSKAccessCredentialsMemberSaslScram512Auth{ + Value: v, + } + + return apiObject + } + + return nil +} + +func expandPipeSourceRabbitMQBrokerParameters(tfMap map[string]interface{}) *types.PipeSourceRabbitMQBrokerParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceRabbitMQBrokerParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMQBrokerAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["queue_name"].(string); ok && v != "" { + apiObject.QueueName = aws.String(v) + } + + if v, ok := tfMap["virtual_host"].(string); ok && v != "" { + apiObject.VirtualHost = aws.String(v) + } + + return apiObject +} + +func expandUpdatePipeSourceRabbitMQBrokerParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceRabbitMQBrokerParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceRabbitMQBrokerParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMQBrokerAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandPipeSourceSelfManagedKafkaParameters(tfMap map[string]interface{}) *types.PipeSourceSelfManagedKafkaParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceSelfManagedKafkaParameters{} + + if v, ok := tfMap["additional_bootstrap_servers"].(*schema.Set); ok && v.Len() > 0 { + apiObject.AdditionalBootstrapServers = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["consumer_group_id"].(string); ok && v != "" { + apiObject.ConsumerGroupID = aws.String(v) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandSelfManagedKafkaAccessConfigurationCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["server_root_ca_certificate"].(string); ok && v != "" { + apiObject.ServerRootCaCertificate = aws.String(v) + } + + if v, ok := tfMap["starting_position"].(string); ok && v != "" { + apiObject.StartingPosition = types.SelfManagedKafkaStartPosition(v) + } + + if v, ok := tfMap["topic_name"].(string); ok && v != "" { + apiObject.TopicName = aws.String(v) + } + + if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Vpc = expandSelfManagedKafkaAccessConfigurationVPC(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandUpdatePipeSourceSelfManagedKafkaParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceSelfManagedKafkaParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceSelfManagedKafkaParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandSelfManagedKafkaAccessConfigurationCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["server_root_ca_certificate"].(string); ok { + apiObject.ServerRootCaCertificate = aws.String(v) + } + + if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Vpc = expandSelfManagedKafkaAccessConfigurationVPC(v[0].(map[string]interface{})) + } else { + apiObject.Vpc = &types.SelfManagedKafkaAccessConfigurationVpc{} + } + + return apiObject +} + +func expandSelfManagedKafkaAccessConfigurationCredentials(tfMap map[string]interface{}) types.SelfManagedKafkaAccessConfigurationCredentials { + if tfMap == nil { + return nil + } + + if v, ok := tfMap["basic_auth"].(string); ok && v != "" { + apiObject := &types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth{ + Value: v, + } + + return apiObject + } + + if v, ok := tfMap["client_certificate_tls_auth"].(string); ok && v != "" { + apiObject := &types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth{ + Value: v, + } + + return apiObject + } + + if v, ok := tfMap["sasl_scram_256_auth"].(string); ok && v != "" { + apiObject := &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth{ + Value: v, + } + + return apiObject + } + + if v, ok := tfMap["sasl_scram_512_auth"].(string); ok && v != "" { + apiObject := &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth{ + Value: v, + } + + return apiObject + } + + return nil +} + +func expandSelfManagedKafkaAccessConfigurationVPC(tfMap map[string]interface{}) *types.SelfManagedKafkaAccessConfigurationVpc { + if tfMap == nil { + return nil + } + + apiObject := &types.SelfManagedKafkaAccessConfigurationVpc{} + + if v, ok := tfMap["security_groups"].(*schema.Set); ok && v.Len() > 0 { + apiObject.SecurityGroup = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["subnets"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Subnets = flex.ExpandStringValueSet(v) + } + + return apiObject +} + +func expandPipeSourceSQSQueueParameters(tfMap map[string]interface{}) *types.PipeSourceSqsQueueParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceSqsQueueParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandUpdatePipeSourceSQSQueueParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceSqsQueueParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceSqsQueueParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + return apiObject +} + +func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.ActiveMQBrokerParameters; v != nil { + tfMap["activemq_broker_parameters"] = []interface{}{flattenPipeSourceActiveMQBrokerParameters(v)} + } + + if v := apiObject.DynamoDBStreamParameters; v != nil { + tfMap["dynamodb_stream_parameters"] = []interface{}{flattenPipeSourceDynamoDBStreamParameters(v)} + } + + if v := apiObject.FilterCriteria; v != nil { + tfMap["filter_criteria"] = []interface{}{flattenFilterCriteria(v)} + } + + if v := apiObject.KinesisStreamParameters; v != nil { + tfMap["kinesis_stream_parameters"] = []interface{}{flattenPipeSourceKinesisStreamParameters(v)} + } + + if v := apiObject.ManagedStreamingKafkaParameters; v != nil { + tfMap["managed_streaming_kafka_parameters"] = []interface{}{flattenPipeSourceManagedStreamingKafkaParameters(v)} + } + + if v := apiObject.RabbitMQBrokerParameters; v != nil { + tfMap["rabbitmq_broker_parameters"] = []interface{}{flattenPipeSourceRabbitMQBrokerParameters(v)} + } + + if v := apiObject.SelfManagedKafkaParameters; v != nil { + tfMap["self_managed_kafka_parameters"] = []interface{}{flattenPipeSourceSelfManagedKafkaParameters(v)} + } + + if v := apiObject.SqsQueueParameters; v != nil { + tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeSourceSQSQueueParameters(v)} + } + + return tfMap +} + +func flattenFilterCriteria(apiObject *types.FilterCriteria) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Filters; v != nil { + tfMap["filter"] = flattenFilters(v) + } + + return tfMap +} + +func flattenFilter(apiObject types.Filter) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Pattern; v != nil { + tfMap["pattern"] = aws.ToString(v) + } + + return tfMap +} + +func flattenFilters(apiObjects []types.Filter) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenFilter(apiObject)) + } + + return tfList +} + +func flattenPipeSourceActiveMQBrokerParameters(apiObject *types.PipeSourceActiveMQBrokerParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.Credentials; v != nil { + tfMap["credentials"] = []interface{}{flattenMQBrokerAccessCredentials(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.QueueName; v != nil { + tfMap["queue_name"] = aws.ToString(v) + } + + return tfMap +} + +func flattenMQBrokerAccessCredentials(apiObject types.MQBrokerAccessCredentials) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if apiObject, ok := apiObject.(*types.MQBrokerAccessCredentialsMemberBasicAuth); ok { + if v := apiObject.Value; v != "" { + tfMap["basic_auth"] = v + } + } + + return tfMap +} + +func flattenPipeSourceDynamoDBStreamParameters(apiObject *types.PipeSourceDynamoDBStreamParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.DeadLetterConfig; v != nil { + tfMap["dead_letter_config"] = []interface{}{flattenDeadLetterConfig(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumRecordAgeInSeconds; v != nil { + tfMap["maximum_record_age_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumRetryAttempts; v != nil { + tfMap["maximum_retry_attempts"] = aws.ToInt32(v) + } + + if v := apiObject.OnPartialBatchItemFailure; v != "" { + tfMap["on_partial_batch_item_failure"] = v + } + + if v := apiObject.ParallelizationFactor; v != nil { + tfMap["parallelization_factor"] = aws.ToInt32(v) + } + + if v := apiObject.StartingPosition; v != "" { + tfMap["starting_position"] = v + } + + return tfMap +} + +func flattenPipeSourceKinesisStreamParameters(apiObject *types.PipeSourceKinesisStreamParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.DeadLetterConfig; v != nil { + tfMap["dead_letter_config"] = []interface{}{flattenDeadLetterConfig(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumRecordAgeInSeconds; v != nil { + tfMap["maximum_record_age_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumRetryAttempts; v != nil { + tfMap["maximum_retry_attempts"] = aws.ToInt32(v) + } + + if v := apiObject.OnPartialBatchItemFailure; v != "" { + tfMap["on_partial_batch_item_failure"] = v + } + + if v := apiObject.ParallelizationFactor; v != nil { + tfMap["parallelization_factor"] = aws.ToInt32(v) + } + + if v := apiObject.StartingPosition; v != "" { + tfMap["starting_position"] = v + } + + if v := apiObject.StartingPositionTimestamp; v != nil { + tfMap["starting_position_timestamp"] = aws.ToTime(v).Format(time.RFC3339) + } + + return tfMap +} + +func flattenPipeSourceManagedStreamingKafkaParameters(apiObject *types.PipeSourceManagedStreamingKafkaParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.ConsumerGroupID; v != nil { + tfMap["consumer_group_id"] = aws.ToString(v) + } + + if v := apiObject.Credentials; v != nil { + tfMap["credentials"] = []interface{}{flattenMSKAccessCredentials(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.StartingPosition; v != "" { + tfMap["starting_position"] = v + } + + if v := apiObject.TopicName; v != nil { + tfMap["topic_name"] = aws.ToString(v) + } + + return tfMap +} + +func flattenMSKAccessCredentials(apiObject types.MSKAccessCredentials) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if apiObject, ok := apiObject.(*types.MSKAccessCredentialsMemberClientCertificateTlsAuth); ok { + if v := apiObject.Value; v != "" { + tfMap["client_certificate_tls_auth"] = v + } + } + + if apiObject, ok := apiObject.(*types.MSKAccessCredentialsMemberSaslScram512Auth); ok { + if v := apiObject.Value; v != "" { + tfMap["sasl_scram_512_auth"] = v + } + } + + return tfMap +} + +func flattenPipeSourceRabbitMQBrokerParameters(apiObject *types.PipeSourceRabbitMQBrokerParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.Credentials; v != nil { + tfMap["credentials"] = []interface{}{flattenMQBrokerAccessCredentials(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.QueueName; v != nil { + tfMap["queue_name"] = aws.ToString(v) + } + + if v := apiObject.VirtualHost; v != nil { + tfMap["virtual_host"] = aws.ToString(v) + } + + return tfMap +} + +func flattenPipeSourceSelfManagedKafkaParameters(apiObject *types.PipeSourceSelfManagedKafkaParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.AdditionalBootstrapServers; v != nil { + tfMap["additional_bootstrap_servers"] = v + } + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.ConsumerGroupID; v != nil { + tfMap["consumer_group_id"] = aws.ToString(v) + } + + if v := apiObject.Credentials; v != nil { + tfMap["credentials"] = []interface{}{flattenSelfManagedKafkaAccessConfigurationCredentials(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.ServerRootCaCertificate; v != nil { + tfMap["server_root_ca_certificate"] = aws.ToString(v) + } + + if v := apiObject.StartingPosition; v != "" { + tfMap["starting_position"] = v + } + + if v := apiObject.TopicName; v != nil { + tfMap["topic_name"] = aws.ToString(v) + } + + if v := apiObject.Vpc; v != nil { + tfMap["vpc"] = []interface{}{flattenSelfManagedKafkaAccessConfigurationVPC(v)} + } + + return tfMap +} + +func flattenSelfManagedKafkaAccessConfigurationCredentials(apiObject types.SelfManagedKafkaAccessConfigurationCredentials) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if apiObject, ok := apiObject.(*types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth); ok { + if v := apiObject.Value; v != "" { + tfMap["basic_auth"] = v + } + } + + if apiObject, ok := apiObject.(*types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth); ok { + if v := apiObject.Value; v != "" { + tfMap["client_certificate_tls_auth"] = v + } + } + + if apiObject, ok := apiObject.(*types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth); ok { + if v := apiObject.Value; v != "" { + tfMap["sasl_scram_256_auth"] = v + } + } + + if apiObject, ok := apiObject.(*types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth); ok { + if v := apiObject.Value; v != "" { + tfMap["sasl_scram_512_auth"] = v + } + } + + return tfMap +} + +func flattenSelfManagedKafkaAccessConfigurationVPC(apiObject *types.SelfManagedKafkaAccessConfigurationVpc) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.SecurityGroup; v != nil { + tfMap["security_groups"] = v + } + + if v := apiObject.Subnets; v != nil { + tfMap["subnets"] = v + } + + return tfMap +} + +func flattenPipeSourceSQSQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + return tfMap +} + +func flattenDeadLetterConfig(apiObject *types.DeadLetterConfig) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Arn; v != nil { + tfMap["arn"] = aws.ToString(v) + } + + return tfMap +} diff --git a/internal/service/pipes/status.go b/internal/service/pipes/status.go deleted file mode 100644 index 4c51772f949..00000000000 --- a/internal/service/pipes/status.go +++ /dev/null @@ -1,40 +0,0 @@ -package pipes - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/aws/aws-sdk-go-v2/service/pipes/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -const ( - pipeStatusRunning = string(types.PipeStateRunning) - pipeStatusStopped = string(types.PipeStateStopped) - pipeStatusCreating = string(types.PipeStateCreating) - pipeStatusUpdating = string(types.PipeStateUpdating) - pipeStatusDeleting = string(types.PipeStateDeleting) - pipeStatusStarting = string(types.PipeStateStarting) - pipeStatusStopping = string(types.PipeStateStopping) - pipeStatusCreateFailed = string(types.PipeStateCreateFailed) - pipeStatusUpdateFailed = string(types.PipeStateUpdateFailed) - pipeStatusStartFailed = string(types.PipeStateStartFailed) - pipeStatusStopFailed = string(types.PipeStateStopFailed) -) - -func statusPipe(ctx context.Context, conn *pipes.Client, name string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindPipeByName(ctx, conn, name) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.CurrentState), nil - } -} diff --git a/internal/service/pipes/sweep.go b/internal/service/pipes/sweep.go index 1355344f58f..8cc2c606098 100644 --- a/internal/service/pipes/sweep.go +++ b/internal/service/pipes/sweep.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" ) @@ -29,37 +28,32 @@ func sweepPipes(region string) error { } conn := client.PipesClient(ctx) sweepResources := make([]sweep.Sweepable, 0) - var errs *multierror.Error - paginator := pipes.NewListPipesPaginator(conn, &pipes.ListPipesInput{}) + pages := pipes.NewListPipesPaginator(conn, &pipes.ListPipesInput{}) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) + if sweep.SkipSweepError(err) { + log.Printf("[WARN] Skipping Pipe sweep for %s: %s", region, err) + return nil + } if err != nil { - errs = multierror.Append(errs, fmt.Errorf("listing Pipes for %s: %w", region, err)) - break + return fmt.Errorf("error listing Pipes (%s): %w", region, err) } - for _, it := range page.Pipes { - name := aws.ToString(it.Name) - - r := ResourcePipe() + for _, v := range page.Pipes { + r := resourcePipe() d := r.Data(nil) - d.SetId(name) + d.SetId(aws.ToString(v.Name)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } } if err := sweep.SweepOrchestratorWithContext(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("sweeping Pipe for %s: %w", region, err)) - } - - if sweep.SkipSweepError(err) { - log.Printf("[WARN] Skipping Pipe sweep for %s: %s", region, errs) - return nil + return fmt.Errorf("error sweeping Pipes (%s): %w", region, err) } - return errs.ErrorOrNil() + return nil } diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go new file mode 100644 index 00000000000..ab6a9369d28 --- /dev/null +++ b/internal/service/pipes/target_parameters.go @@ -0,0 +1,2698 @@ +package pipes + +import ( + "regexp" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/pipes/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func targetParametersSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_job_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.sqs_queue_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "array_properties": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(2, 10000), + }, + }, + }, + }, + "container_overrides": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "command": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "environment": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "instance_type": { + Type: schema.TypeString, + Optional: true, + }, + "resource_requirement": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.BatchResourceRequirementType](), + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "depends_on": { + Type: schema.TypeList, + Optional: true, + MaxItems: 20, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_id": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.BatchJobDependencyType](), + }, + }, + }, + }, + "job_definition": { + Type: schema.TypeString, + Required: true, + }, + "job_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "parameters": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "retry_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attempts": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10), + }, + }, + }, + }, + }, + }, + }, + "cloudwatch_logs_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.sqs_queue_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_stream_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), + }, + "timestamp": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^\$(\.[\w/_-]+(\[(\d+|\*)\])*)*$`), ""), + ), + }, + }, + }, + }, + "ecs_task_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.sqs_queue_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "capacity_provider_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 6, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "base": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100000), + }, + "capacity_provider": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 1000), + }, + }, + }, + }, + "enable_ecs_managed_tags": { + Type: schema.TypeBool, + Optional: true, + }, + "enable_execute_command": { + Type: schema.TypeBool, + Optional: true, + }, + "group": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "launch_type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.LaunchType](), + }, + "network_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aws_vpc_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "assign_public_ip": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AssignPublicIp](), + }, + "security_groups": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 5, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^sg-[0-9a-zA-Z]*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), + ), + }, + }, + "subnets": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 16, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^subnet-[0-9a-z]*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), + ), + }, + }, + }, + }, + }, + }, + }, + }, + "overrides": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_override": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "command": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "cpu": { + Type: schema.TypeInt, + Optional: true, + }, + "environment": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "environment_file": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.EcsEnvironmentFileType](), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, + "memory": { + Type: schema.TypeInt, + Optional: true, + }, + "memory_reservation": { + Type: schema.TypeInt, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "resource_requirement": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.EcsResourceRequirementType](), + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "cpu": { + Type: schema.TypeString, + Optional: true, + }, + "ephemeral_storage": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_in_gib": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(21, 200), + }, + }, + }, + }, + "execution_role_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + "inference_accelerator_override": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Optional: true, + }, + "device_type": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "memory": { + Type: schema.TypeString, + Optional: true, + }, + "task_role_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, + "placement_constraint": { + Type: schema.TypeList, + Optional: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 2000), + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.PlacementConstraintType](), + }, + }, + }, + }, + "placement_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.PlacementStrategyType](), + }, + }, + }, + }, + "platform_version": { + Type: schema.TypeString, + Optional: true, + }, + "propagate_tags": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.PropagateTags](), + }, + "reference_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + "tags": tftags.TagsSchema(), + "task_count": { + Type: schema.TypeInt, + Optional: true, + }, + "task_definition_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, + "eventbridge_event_bus_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.sqs_queue_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "detail_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 128), + }, + "endpoint_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 50), + validation.StringMatch(regexp.MustCompile(`^[A-Za-z0-9\-]+[\.][A-Za-z0-9\-]+$`), ""), + ), + }, + "resources": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 10, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidARN, + }, + }, + "source": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + ), + }, + "time": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^\$(\.[\w/_-]+(\[(\d+|\*)\])*)*$`), ""), + ), + }, + }, + }, + }, + "http_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.sqs_queue_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_parameters": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "path_parameter_values": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "query_string_parameters": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "input_template": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 8192), + }, + "kinesis_stream_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.sqs_queue_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "partition_key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + }, + }, + }, + "lambda_function_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.sqs_queue_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "invocation_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.PipeTargetInvocationType](), + }, + }, + }, + }, + "redshift_data_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.sqs_queue_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, + "db_user": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "secret_manager_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + "sqls": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 100000), + }, + }, + "statement_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 500), + }, + "with_event": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "sagemaker_pipeline_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sqs_queue_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pipeline_parameter": { + Type: schema.TypeList, + Optional: true, + MaxItems: 200, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9])*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), + ), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + }, + }, + }, + }, + }, + }, + "sqs_queue_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.step_function_state_machine_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message_deduplication_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + }, + "message_group_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + }, + }, + }, + }, + "step_function_state_machine_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_job_parameters", + "target_parameters.0.cloudwatch_logs_parameters", + "target_parameters.0.ecs_task_parameters", + "target_parameters.0.eventbridge_event_bus_parameters", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream_parameters", + "target_parameters.0.lambda_function_parameters", + "target_parameters.0.redshift_data_parameters", + "target_parameters.0.sagemaker_pipeline_parameters", + "target_parameters.0.sqs_queue_parameters", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "invocation_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.PipeTargetInvocationType](), + }, + }, + }, + }, + }, + }, + } +} + +func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetParameters{} + + if v, ok := tfMap["batch_job_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.BatchJobParameters = expandPipeTargetBatchJobParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["cloudwatch_logs_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.CloudWatchLogsParameters = expandPipeTargetCloudWatchLogsParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["ecs_task_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.EcsTaskParameters = expandPipeTargetECSTaskParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["eventbridge_event_bus_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.EventBridgeEventBusParameters = expandPipeTargetEventBridgeEventBusParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["http_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.HttpParameters = expandPipeTargetHTTPParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["input_template"].(string); ok && v != "" { + apiObject.InputTemplate = aws.String(v) + } + + if v, ok := tfMap["kinesis_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.KinesisStreamParameters = expandPipeTargetKinesisStreamParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["lambda_function_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.LambdaFunctionParameters = expandPipeTargetLambdaFunctionParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["redshift_data_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RedshiftDataParameters = expandPipeTargetRedshiftDataParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["sagemaker_pipeline_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SageMakerPipelineParameters = expandPipeTargetSageMakerPipelineParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SqsQueueParameters = expandPipeTargetSQSQueueParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["step_function_state_machine_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.StepFunctionStateMachineParameters = expandPipeTargetStateMachineParameters(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandPipeTargetBatchJobParameters(tfMap map[string]interface{}) *types.PipeTargetBatchJobParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetBatchJobParameters{} + + if v, ok := tfMap["array_properties"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ArrayProperties = expandBatchArrayProperties(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["container_overrides"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ContainerOverrides = expandBatchContainerOverrides(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["depends_on"].([]interface{}); ok && len(v) > 0 { + apiObject.DependsOn = expandBatchJobDependencies(v) + } + + if v, ok := tfMap["job_definition"].(string); ok && v != "" { + apiObject.JobDefinition = aws.String(v) + } + + if v, ok := tfMap["job_name"].(string); ok && v != "" { + apiObject.JobName = aws.String(v) + } + + if v, ok := tfMap["parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.Parameters = flex.ExpandStringValueMap(v) + } + + if v, ok := tfMap["retry_strategy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RetryStrategy = expandBatchRetryStrategy(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandBatchArrayProperties(tfMap map[string]interface{}) *types.BatchArrayProperties { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchArrayProperties{} + + if v, ok := tfMap["size"].(int); ok { + apiObject.Size = int32(v) + } + + return apiObject +} + +func expandBatchContainerOverrides(tfMap map[string]interface{}) *types.BatchContainerOverrides { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchContainerOverrides{} + + if v, ok := tfMap["command"].([]interface{}); ok && len(v) > 0 { + apiObject.Command = flex.ExpandStringValueList(v) + } + + if v, ok := tfMap["environment"].([]interface{}); ok && len(v) > 0 { + apiObject.Environment = expandBatchEnvironmentVariables(v) + } + + if v, ok := tfMap["instance_type"].(string); ok && v != "" { + apiObject.InstanceType = aws.String(v) + } + + if v, ok := tfMap["resource_requirement"].([]interface{}); ok && len(v) > 0 { + apiObject.ResourceRequirements = expandBatchResourceRequirements(v) + } + + return apiObject +} + +func expandBatchEnvironmentVariable(tfMap map[string]interface{}) *types.BatchEnvironmentVariable { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchEnvironmentVariable{} + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandBatchEnvironmentVariables(tfList []interface{}) []types.BatchEnvironmentVariable { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.BatchEnvironmentVariable + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandBatchEnvironmentVariable(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandBatchResourceRequirement(tfMap map[string]interface{}) *types.BatchResourceRequirement { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchResourceRequirement{} + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.BatchResourceRequirementType(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandBatchResourceRequirements(tfList []interface{}) []types.BatchResourceRequirement { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.BatchResourceRequirement + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandBatchResourceRequirement(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandBatchJobDependency(tfMap map[string]interface{}) *types.BatchJobDependency { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchJobDependency{} + + if v, ok := tfMap["job_id"].(string); ok && v != "" { + apiObject.JobId = aws.String(v) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.BatchJobDependencyType(v) + } + + return apiObject +} + +func expandBatchJobDependencies(tfList []interface{}) []types.BatchJobDependency { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.BatchJobDependency + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandBatchJobDependency(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandBatchRetryStrategy(tfMap map[string]interface{}) *types.BatchRetryStrategy { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchRetryStrategy{} + + if v, ok := tfMap["attempts"].(int); ok { + apiObject.Attempts = int32(v) + } + + return apiObject +} + +func expandPipeTargetCloudWatchLogsParameters(tfMap map[string]interface{}) *types.PipeTargetCloudWatchLogsParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetCloudWatchLogsParameters{} + + if v, ok := tfMap["log_stream_name"].(string); ok && v != "" { + apiObject.LogStreamName = aws.String(v) + } + + if v, ok := tfMap["timestamp"].(string); ok && v != "" { + apiObject.Timestamp = aws.String(v) + } + + return apiObject +} + +func expandPipeTargetECSTaskParameters(tfMap map[string]interface{}) *types.PipeTargetEcsTaskParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetEcsTaskParameters{} + + if v, ok := tfMap["capacity_provider_strategy"].([]interface{}); ok && len(v) > 0 { + apiObject.CapacityProviderStrategy = expandCapacityProviderStrategyItems(v) + } + + if v, ok := tfMap["enable_ecs_managed_tags"].(bool); ok { + apiObject.EnableECSManagedTags = v + } + + if v, ok := tfMap["enable_execute_command"].(bool); ok { + apiObject.EnableExecuteCommand = v + } + + if v, ok := tfMap["group"].(string); ok && v != "" { + apiObject.Group = aws.String(v) + } + + if v, ok := tfMap["launch_type"].(string); ok && v != "" { + apiObject.LaunchType = types.LaunchType(v) + } + + if v, ok := tfMap["network_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.NetworkConfiguration = expandNetworkConfiguration(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["overrides"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Overrides = expandECSTaskOverride(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["placement_constraint"].([]interface{}); ok && len(v) > 0 { + apiObject.PlacementConstraints = expandPlacementConstraints(v) + } + + if v, ok := tfMap["placement_strategy"].([]interface{}); ok && len(v) > 0 { + apiObject.PlacementStrategy = expandPlacementStrategies(v) + } + + if v, ok := tfMap["platform_version"].(string); ok && v != "" { + apiObject.PlatformVersion = aws.String(v) + } + + if v, ok := tfMap["propagate_tags"].(string); ok && v != "" { + apiObject.PropagateTags = types.PropagateTags(v) + } + + if v, ok := tfMap["reference_id"].(string); ok && v != "" { + apiObject.ReferenceId = aws.String(v) + } + + if v, ok := tfMap["tags"].(map[string]interface{}); ok && len(v) > 0 { + for k, v := range flex.ExpandStringValueMap(v) { + apiObject.Tags = append(apiObject.Tags, types.Tag{Key: aws.String(k), Value: aws.String(v)}) + } + } + + if v, ok := tfMap["task_count"].(int); ok { + apiObject.TaskCount = aws.Int32(int32(v)) + } + + if v, ok := tfMap["task_definition_arn"].(string); ok && v != "" { + apiObject.TaskDefinitionArn = aws.String(v) + } + + return apiObject +} + +func expandCapacityProviderStrategyItem(tfMap map[string]interface{}) *types.CapacityProviderStrategyItem { + if tfMap == nil { + return nil + } + + apiObject := &types.CapacityProviderStrategyItem{} + + if v, ok := tfMap["base"].(int); ok { + apiObject.Base = int32(v) + } + + if v, ok := tfMap["capacity_provider"].(string); ok && v != "" { + apiObject.CapacityProvider = aws.String(v) + } + + if v, ok := tfMap["weight"].(int); ok { + apiObject.Weight = int32(v) + } + + return apiObject +} + +func expandCapacityProviderStrategyItems(tfList []interface{}) []types.CapacityProviderStrategyItem { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.CapacityProviderStrategyItem + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandCapacityProviderStrategyItem(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandNetworkConfiguration(tfMap map[string]interface{}) *types.NetworkConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &types.NetworkConfiguration{} + + if v, ok := tfMap["aws_vpc_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.AwsvpcConfiguration = expandVPCConfiguration(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandVPCConfiguration(tfMap map[string]interface{}) *types.AwsVpcConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &types.AwsVpcConfiguration{} + + if v, ok := tfMap["assign_public_ip"].(string); ok && v != "" { + apiObject.AssignPublicIp = types.AssignPublicIp(v) + } + + if v, ok := tfMap["security_groups"].(*schema.Set); ok && v.Len() > 0 { + apiObject.SecurityGroups = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["subnets"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Subnets = flex.ExpandStringValueSet(v) + } + + return apiObject +} + +func expandECSTaskOverride(tfMap map[string]interface{}) *types.EcsTaskOverride { + if tfMap == nil { + return nil + } + + apiObject := &types.EcsTaskOverride{} + + if v, ok := tfMap["container_override"].([]interface{}); ok && len(v) > 0 { + apiObject.ContainerOverrides = expandECSContainerOverrides(v) + } + + if v, ok := tfMap["cpu"].(string); ok && v != "" { + apiObject.Cpu = aws.String(v) + } + + if v, ok := tfMap["ephemeral_storage"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.EphemeralStorage = expandECSEphemeralStorage(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["execution_role_arn"].(string); ok && v != "" { + apiObject.ExecutionRoleArn = aws.String(v) + } + + if v, ok := tfMap["inference_accelerator_override"].([]interface{}); ok && len(v) > 0 { + apiObject.InferenceAcceleratorOverrides = expandECSInferenceAcceleratorOverrides(v) + } + + if v, ok := tfMap["memory"].(string); ok && v != "" { + apiObject.Memory = aws.String(v) + } + + if v, ok := tfMap["task_role_arn"].(string); ok && v != "" { + apiObject.TaskRoleArn = aws.String(v) + } + + return apiObject +} + +func expandECSContainerOverride(tfMap map[string]interface{}) *types.EcsContainerOverride { + if tfMap == nil { + return nil + } + + apiObject := &types.EcsContainerOverride{} + + if v, ok := tfMap["command"].([]interface{}); ok && len(v) > 0 { + apiObject.Command = flex.ExpandStringValueList(v) + } + + if v, ok := tfMap["cpu"].(int); ok { + apiObject.Cpu = aws.Int32(int32(v)) + } + + if v, ok := tfMap["environment"].([]interface{}); ok && len(v) > 0 { + apiObject.Environment = expandECSEnvironmentVariables(v) + } + + if v, ok := tfMap["environment_file"].([]interface{}); ok && len(v) > 0 { + apiObject.EnvironmentFiles = expandECSEnvironmentFiles(v) + } + + if v, ok := tfMap["memory"].(int); ok { + apiObject.Memory = aws.Int32(int32(v)) + } + + if v, ok := tfMap["memory_reservation"].(int); ok { + apiObject.MemoryReservation = aws.Int32(int32(v)) + } + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + if v, ok := tfMap["resource_requirement"].([]interface{}); ok && len(v) > 0 { + apiObject.ResourceRequirements = expandECSResourceRequirements(v) + } + + return apiObject +} + +func expandECSContainerOverrides(tfList []interface{}) []types.EcsContainerOverride { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.EcsContainerOverride + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandECSContainerOverride(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandECSEnvironmentVariable(tfMap map[string]interface{}) *types.EcsEnvironmentVariable { + if tfMap == nil { + return nil + } + + apiObject := &types.EcsEnvironmentVariable{} + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandECSEnvironmentVariables(tfList []interface{}) []types.EcsEnvironmentVariable { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.EcsEnvironmentVariable + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandECSEnvironmentVariable(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandECSEnvironmentFile(tfMap map[string]interface{}) *types.EcsEnvironmentFile { + if tfMap == nil { + return nil + } + + apiObject := &types.EcsEnvironmentFile{} + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.EcsEnvironmentFileType(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandECSEnvironmentFiles(tfList []interface{}) []types.EcsEnvironmentFile { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.EcsEnvironmentFile + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandECSEnvironmentFile(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandECSResourceRequirement(tfMap map[string]interface{}) *types.EcsResourceRequirement { + if tfMap == nil { + return nil + } + + apiObject := &types.EcsResourceRequirement{} + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.EcsResourceRequirementType(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandECSResourceRequirements(tfList []interface{}) []types.EcsResourceRequirement { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.EcsResourceRequirement + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandECSResourceRequirement(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandECSEphemeralStorage(tfMap map[string]interface{}) *types.EcsEphemeralStorage { + if tfMap == nil { + return nil + } + + apiObject := &types.EcsEphemeralStorage{} + + if v, ok := tfMap["size_in_gib"].(int); ok { + apiObject.SizeInGiB = int32(v) + } + + return apiObject +} + +func expandECSInferenceAcceleratorOverride(tfMap map[string]interface{}) *types.EcsInferenceAcceleratorOverride { + if tfMap == nil { + return nil + } + + apiObject := &types.EcsInferenceAcceleratorOverride{} + + if v, ok := tfMap["device_name"].(string); ok && v != "" { + apiObject.DeviceName = aws.String(v) + } + + if v, ok := tfMap["device_type"].(string); ok && v != "" { + apiObject.DeviceType = aws.String(v) + } + + return apiObject +} + +func expandECSInferenceAcceleratorOverrides(tfList []interface{}) []types.EcsInferenceAcceleratorOverride { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.EcsInferenceAcceleratorOverride + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandECSInferenceAcceleratorOverride(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandPlacementConstraint(tfMap map[string]interface{}) *types.PlacementConstraint { + if tfMap == nil { + return nil + } + + apiObject := &types.PlacementConstraint{} + + if v, ok := tfMap["expression"].(string); ok && v != "" { + apiObject.Expression = aws.String(v) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.PlacementConstraintType(v) + } + + return apiObject +} + +func expandPlacementConstraints(tfList []interface{}) []types.PlacementConstraint { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.PlacementConstraint + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandPlacementConstraint(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandPlacementStrategy(tfMap map[string]interface{}) *types.PlacementStrategy { + if tfMap == nil { + return nil + } + + apiObject := &types.PlacementStrategy{} + + if v, ok := tfMap["field"].(string); ok && v != "" { + apiObject.Field = aws.String(v) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.PlacementStrategyType(v) + } + + return apiObject +} + +func expandPlacementStrategies(tfList []interface{}) []types.PlacementStrategy { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.PlacementStrategy + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandPlacementStrategy(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandPipeTargetEventBridgeEventBusParameters(tfMap map[string]interface{}) *types.PipeTargetEventBridgeEventBusParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetEventBridgeEventBusParameters{} + + if v, ok := tfMap["detail_type"].(string); ok && v != "" { + apiObject.DetailType = aws.String(v) + } + + if v, ok := tfMap["endpoint_id"].(string); ok && v != "" { + apiObject.EndpointId = aws.String(v) + } + + if v, ok := tfMap["resources"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Resources = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["source"].(string); ok && v != "" { + apiObject.Source = aws.String(v) + } + + if v, ok := tfMap["time"].(string); ok && v != "" { + apiObject.Time = aws.String(v) + } + + return apiObject +} + +func expandPipeTargetHTTPParameters(tfMap map[string]interface{}) *types.PipeTargetHttpParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetHttpParameters{} + + if v, ok := tfMap["header_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.HeaderParameters = flex.ExpandStringValueMap(v) + } + + if v, ok := tfMap["path_parameter_values"].([]interface{}); ok && len(v) > 0 { + apiObject.PathParameterValues = flex.ExpandStringValueList(v) + } + + if v, ok := tfMap["query_string_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.QueryStringParameters = flex.ExpandStringValueMap(v) + } + + return apiObject +} + +func expandPipeTargetKinesisStreamParameters(tfMap map[string]interface{}) *types.PipeTargetKinesisStreamParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetKinesisStreamParameters{} + + if v, ok := tfMap["partition_key"].(string); ok && v != "" { + apiObject.PartitionKey = aws.String(v) + } + + return apiObject +} + +func expandPipeTargetLambdaFunctionParameters(tfMap map[string]interface{}) *types.PipeTargetLambdaFunctionParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetLambdaFunctionParameters{} + + if v, ok := tfMap["invocation_type"].(string); ok && v != "" { + apiObject.InvocationType = types.PipeTargetInvocationType(v) + } + + return apiObject +} + +func expandPipeTargetRedshiftDataParameters(tfMap map[string]interface{}) *types.PipeTargetRedshiftDataParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetRedshiftDataParameters{} + + if v, ok := tfMap["database"].(string); ok && v != "" { + apiObject.Database = aws.String(v) + } + + if v, ok := tfMap["db_user"].(string); ok && v != "" { + apiObject.DbUser = aws.String(v) + } + + if v, ok := tfMap["secret_manager_arn"].(string); ok && v != "" { + apiObject.SecretManagerArn = aws.String(v) + } + + if v, ok := tfMap["sqls"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Sqls = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["statement_name"].(string); ok && v != "" { + apiObject.StatementName = aws.String(v) + } + + if v, ok := tfMap["with_event"].(bool); ok { + apiObject.WithEvent = v + } + + return apiObject +} + +func expandPipeTargetSageMakerPipelineParameters(tfMap map[string]interface{}) *types.PipeTargetSageMakerPipelineParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetSageMakerPipelineParameters{} + + if v, ok := tfMap["pipeline_parameter"].([]interface{}); ok && len(v) > 0 { + apiObject.PipelineParameterList = expandSageMakerPipelineParameters(v) + } + + return apiObject +} + +func expandSageMakerPipelineParameter(tfMap map[string]interface{}) *types.SageMakerPipelineParameter { + if tfMap == nil { + return nil + } + + apiObject := &types.SageMakerPipelineParameter{} + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandSageMakerPipelineParameters(tfList []interface{}) []types.SageMakerPipelineParameter { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.SageMakerPipelineParameter + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandSageMakerPipelineParameter(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandPipeTargetSQSQueueParameters(tfMap map[string]interface{}) *types.PipeTargetSqsQueueParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetSqsQueueParameters{} + + if v, ok := tfMap["message_deduplication_id"].(string); ok && v != "" { + apiObject.MessageDeduplicationId = aws.String(v) + } + + if v, ok := tfMap["message_group_id"].(string); ok && v != "" { + apiObject.MessageGroupId = aws.String(v) + } + + return apiObject +} + +func expandPipeTargetStateMachineParameters(tfMap map[string]interface{}) *types.PipeTargetStateMachineParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetStateMachineParameters{} + + if v, ok := tfMap["invocation_type"].(string); ok && v != "" { + apiObject.InvocationType = types.PipeTargetInvocationType(v) + } + + return apiObject +} + +func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchJobParameters; v != nil { + tfMap["batch_job_parameters"] = []interface{}{flattenPipeTargetBatchJobParameters(v)} + } + + if v := apiObject.CloudWatchLogsParameters; v != nil { + tfMap["cloudwatch_logs_parameters"] = []interface{}{flattenPipeTargetCloudWatchLogsParameters(v)} + } + + if v := apiObject.EcsTaskParameters; v != nil { + tfMap["cloudwatch_logs_parameters"] = []interface{}{flattenPipeTargetECSTaskParameters(v)} + } + + if v := apiObject.EventBridgeEventBusParameters; v != nil { + tfMap["eventbridge_event_bus_parameters"] = []interface{}{flattenPipeTargetEventBridgeEventBusParameters(v)} + } + + if v := apiObject.HttpParameters; v != nil { + tfMap["http_parameters"] = []interface{}{flattenPipeTargetHTTPParameters(v)} + } + + if v := apiObject.InputTemplate; v != nil { + tfMap["input_template"] = aws.ToString(v) + } + + if v := apiObject.KinesisStreamParameters; v != nil { + tfMap["kinesis_stream_parameters"] = []interface{}{flattenPipeTargetKinesisStreamParameters(v)} + } + + if v := apiObject.LambdaFunctionParameters; v != nil { + tfMap["lambda_function_parameters"] = []interface{}{flattenPipeTargetLambdaFunctionParameters(v)} + } + + if v := apiObject.RedshiftDataParameters; v != nil { + tfMap["redshift_data_parameters"] = []interface{}{flattenPipeTargetRedshiftDataParameters(v)} + } + + if v := apiObject.SageMakerPipelineParameters; v != nil { + tfMap["sagemaker_pipeline_parameters"] = []interface{}{flattenPipeTargetSageMakerPipelineParameters(v)} + } + + if v := apiObject.SqsQueueParameters; v != nil { + tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeTargetSQSQueueParameters(v)} + } + + if v := apiObject.StepFunctionStateMachineParameters; v != nil { + tfMap["step_function_state_machine_parameters"] = []interface{}{flattenPipeTargetStateMachineParameters(v)} + } + + return tfMap +} + +func flattenPipeTargetBatchJobParameters(apiObject *types.PipeTargetBatchJobParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.ArrayProperties; v != nil { + tfMap["array_properties"] = []interface{}{flattenBatchArrayProperties(v)} + } + + if v := apiObject.ContainerOverrides; v != nil { + tfMap["container_overrides"] = []interface{}{flattenBatchContainerOverrides(v)} + } + + if v := apiObject.DependsOn; v != nil { + tfMap["depends_on"] = flattenBatchJobDependencies(v) + } + + if v := apiObject.JobDefinition; v != nil { + tfMap["job_definition"] = aws.ToString(v) + } + + if v := apiObject.JobName; v != nil { + tfMap["job_name"] = aws.ToString(v) + } + + if v := apiObject.Parameters; v != nil { + tfMap["parameters"] = v + } + + if v := apiObject.RetryStrategy; v != nil { + tfMap["retry_strategy"] = []interface{}{flattenBatchRetryStrategy(v)} + } + + return tfMap +} + +func flattenBatchArrayProperties(apiObject *types.BatchArrayProperties) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Size; v != 0 { + tfMap["size"] = int(v) + } + + return tfMap +} + +func flattenBatchContainerOverrides(apiObject *types.BatchContainerOverrides) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Command; v != nil { + tfMap["command"] = v + } + + if v := apiObject.Environment; v != nil { + tfMap["environment"] = flattenBatchEnvironmentVariables(v) + } + + if v := apiObject.InstanceType; v != nil { + tfMap["instance_type"] = aws.ToString(v) + } + + if v := apiObject.ResourceRequirements; v != nil { + tfMap["resource_requirement"] = flattenBatchResourceRequirements(v) + } + + return tfMap +} + +func flattenBatchEnvironmentVariable(apiObject types.BatchEnvironmentVariable) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.ToString(v) + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) + } + + return tfMap +} + +func flattenBatchEnvironmentVariables(apiObjects []types.BatchEnvironmentVariable) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenBatchEnvironmentVariable(apiObject)) + } + + return tfList +} + +func flattenBatchResourceRequirement(apiObject types.BatchResourceRequirement) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Type; v != "" { + tfMap["type"] = v + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) + } + + return tfMap +} + +func flattenBatchResourceRequirements(apiObjects []types.BatchResourceRequirement) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenBatchResourceRequirement(apiObject)) + } + + return tfList +} + +func flattenBatchJobDependency(apiObject types.BatchJobDependency) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.JobId; v != nil { + tfMap["job_id"] = aws.ToString(v) + } + + if v := apiObject.Type; v != "" { + tfMap["type"] = v + } + + return tfMap +} + +func flattenBatchJobDependencies(apiObjects []types.BatchJobDependency) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenBatchJobDependency(apiObject)) + } + + return tfList +} + +func flattenBatchRetryStrategy(apiObject *types.BatchRetryStrategy) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Attempts; v != 0 { + tfMap["attempts"] = int(v) + } + + return tfMap +} + +func flattenPipeTargetCloudWatchLogsParameters(apiObject *types.PipeTargetCloudWatchLogsParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.LogStreamName; v != nil { + tfMap["log_stream_name"] = aws.ToString(v) + } + + if v := apiObject.Timestamp; v != nil { + tfMap["timestamp"] = aws.ToString(v) + } + + return tfMap +} + +func flattenPipeTargetECSTaskParameters(apiObject *types.PipeTargetEcsTaskParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{ + "enable_ecs_managed_tags": apiObject.EnableECSManagedTags, + "enable_execute_command": apiObject.EnableExecuteCommand, + } + + if v := apiObject.CapacityProviderStrategy; v != nil { + tfMap["capacity_provider_strategy"] = flattenCapacityProviderStrategyItems(v) + } + + if v := apiObject.Group; v != nil { + tfMap["group"] = aws.ToString(v) + } + + if v := apiObject.LaunchType; v != "" { + tfMap["launch_type"] = v + } + + if v := apiObject.NetworkConfiguration; v != nil { + tfMap["network_configuration"] = []interface{}{flattenNetworkConfiguration(v)} + } + + if v := apiObject.Overrides; v != nil { + tfMap["overrides"] = []interface{}{flattenECSTaskOverride(v)} + } + + if v := apiObject.PlacementConstraints; v != nil { + tfMap["placement_constraint"] = flattenPlacementConstraints(v) + } + + if v := apiObject.PlacementStrategy; v != nil { + tfMap["placement_strategy"] = flattenPlacementStrategies(v) + } + + if v := apiObject.PlatformVersion; v != nil { + tfMap["platform_version"] = aws.ToString(v) + } + + if v := apiObject.PropagateTags; v != "" { + tfMap["propagate_tags"] = v + } + + if v := apiObject.ReferenceId; v != nil { + tfMap["reference_id"] = aws.ToString(v) + } + + if v := apiObject.Tags; v != nil { + tags := map[string]interface{}{} + + for _, apiObject := range v { + tags[aws.ToString(apiObject.Key)] = aws.ToString(apiObject.Value) + } + + tfMap["tags"] = tags + } + + if v := apiObject.TaskCount; v != nil { + tfMap["task_count"] = aws.ToInt32(v) + } + + if v := apiObject.TaskDefinitionArn; v != nil { + tfMap["task_definition_arn"] = aws.ToString(v) + } + + return tfMap +} + +func flattenCapacityProviderStrategyItem(apiObject types.CapacityProviderStrategyItem) map[string]interface{} { + tfMap := map[string]interface{}{ + "base": apiObject.Base, + "weight": apiObject.Weight, + } + + if v := apiObject.CapacityProvider; v != nil { + tfMap["capacity_provider"] = aws.ToString(v) + } + + return tfMap +} + +func flattenCapacityProviderStrategyItems(apiObjects []types.CapacityProviderStrategyItem) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenCapacityProviderStrategyItem(apiObject)) + } + + return tfList +} + +func flattenECSTaskOverride(apiObject *types.EcsTaskOverride) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.ContainerOverrides; v != nil { + tfMap["container_override"] = flattenECSContainerOverrides(v) + } + + if v := apiObject.Cpu; v != nil { + tfMap["cpu"] = aws.ToString(v) + } + + if v := apiObject.EphemeralStorage; v != nil { + tfMap["ephemeral_storage"] = []interface{}{flattenECSEphemeralStorage(v)} + } + + if v := apiObject.ExecutionRoleArn; v != nil { + tfMap["execution_role_arn"] = aws.ToString(v) + } + + if v := apiObject.InferenceAcceleratorOverrides; v != nil { + tfMap["inference_accelerator_override"] = flattenECSInferenceAcceleratorOverrides(v) + } + + if v := apiObject.Memory; v != nil { + tfMap["memory"] = aws.ToString(v) + } + + if v := apiObject.TaskRoleArn; v != nil { + tfMap["task_role_arn"] = aws.ToString(v) + } + + return tfMap +} + +func flattenECSContainerOverride(apiObject types.EcsContainerOverride) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Command; v != nil { + tfMap["command"] = v + } + + if v := apiObject.Cpu; v != nil { + tfMap["cpu"] = aws.ToInt32(v) + } + + if v := apiObject.Environment; v != nil { + tfMap["environment"] = flattenECSEnvironmentVariables(v) + } + + if v := apiObject.EnvironmentFiles; v != nil { + tfMap["environment_file"] = flattenECSEnvironmentFiles(v) + } + + if v := apiObject.Memory; v != nil { + tfMap["memory"] = aws.ToInt32(v) + } + + if v := apiObject.MemoryReservation; v != nil { + tfMap["memory_reservation"] = aws.ToInt32(v) + } + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.ToString(v) + } + + if v := apiObject.ResourceRequirements; v != nil { + tfMap["resource_requirement"] = flattenECSResourceRequirements(v) + } + + return tfMap +} + +func flattenECSContainerOverrides(apiObjects []types.EcsContainerOverride) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenECSContainerOverride(apiObject)) + } + + return tfList +} + +func flattenECSResourceRequirement(apiObject types.EcsResourceRequirement) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Type; v != "" { + tfMap["name"] = v + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) + } + + return tfMap +} + +func flattenECSResourceRequirements(apiObjects []types.EcsResourceRequirement) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenECSResourceRequirement(apiObject)) + } + + return tfList +} + +func flattenECSEnvironmentFile(apiObject types.EcsEnvironmentFile) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Type; v != "" { + tfMap["name"] = v + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) + } + + return tfMap +} + +func flattenECSEnvironmentVariable(apiObject types.EcsEnvironmentVariable) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.ToString(v) + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) + } + + return tfMap +} + +func flattenECSEnvironmentVariables(apiObjects []types.EcsEnvironmentVariable) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenECSEnvironmentVariable(apiObject)) + } + + return tfList +} + +func flattenECSEnvironmentFiles(apiObjects []types.EcsEnvironmentFile) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenECSEnvironmentFile(apiObject)) + } + + return tfList +} + +func flattenECSEphemeralStorage(apiObject *types.EcsEphemeralStorage) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{ + "size_in_gib": apiObject.SizeInGiB, + } + + return tfMap +} + +func flattenECSInferenceAcceleratorOverride(apiObject types.EcsInferenceAcceleratorOverride) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.DeviceName; v != nil { + tfMap["device_name"] = aws.ToString(v) + } + + if v := apiObject.DeviceType; v != nil { + tfMap["device_type"] = aws.ToString(v) + } + + return tfMap +} + +func flattenECSInferenceAcceleratorOverrides(apiObjects []types.EcsInferenceAcceleratorOverride) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenECSInferenceAcceleratorOverride(apiObject)) + } + + return tfList +} + +func flattenNetworkConfiguration(apiObject *types.NetworkConfiguration) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.AwsvpcConfiguration; v != nil { + tfMap["aws_vpc_configuration"] = []interface{}{flattenVPCConfiguration(v)} + } + + return tfMap +} + +func flattenVPCConfiguration(apiObject *types.AwsVpcConfiguration) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.AssignPublicIp; v != "" { + tfMap["assign_public_ip"] = v + } + + if v := apiObject.SecurityGroups; v != nil { + tfMap["security_groups"] = v + } + + if v := apiObject.Subnets; v != nil { + tfMap["subnets"] = v + } + + return tfMap +} + +func flattenPlacementConstraint(apiObject types.PlacementConstraint) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Expression; v != nil { + tfMap["expression"] = aws.ToString(v) + } + + if v := apiObject.Type; v != "" { + tfMap["type"] = v + } + + return tfMap +} + +func flattenPlacementConstraints(apiObjects []types.PlacementConstraint) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenPlacementConstraint(apiObject)) + } + + return tfList +} + +func flattenPlacementStrategy(apiObject types.PlacementStrategy) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Field; v != nil { + tfMap["field"] = aws.ToString(v) + } + + if v := apiObject.Type; v != "" { + tfMap["type"] = v + } + + return tfMap +} + +func flattenPlacementStrategies(apiObjects []types.PlacementStrategy) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenPlacementStrategy(apiObject)) + } + + return tfList +} + +func flattenPipeTargetEventBridgeEventBusParameters(apiObject *types.PipeTargetEventBridgeEventBusParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.DetailType; v != nil { + tfMap["detail_type"] = aws.ToString(v) + } + + if v := apiObject.EndpointId; v != nil { + tfMap["endpoint_id"] = aws.ToString(v) + } + + if v := apiObject.Resources; v != nil { + tfMap["resources"] = v + } + + if v := apiObject.Source; v != nil { + tfMap["source"] = aws.ToString(v) + } + + if v := apiObject.Time; v != nil { + tfMap["time"] = aws.ToString(v) + } + + return tfMap +} + +func flattenPipeTargetHTTPParameters(apiObject *types.PipeTargetHttpParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.HeaderParameters; v != nil { + tfMap["header_parameters"] = v + } + + if v := apiObject.PathParameterValues; v != nil { + tfMap["path_parameter_values"] = v + } + + if v := apiObject.QueryStringParameters; v != nil { + tfMap["query_string_parameters"] = v + } + + return tfMap +} + +func flattenPipeTargetKinesisStreamParameters(apiObject *types.PipeTargetKinesisStreamParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.PartitionKey; v != nil { + tfMap["partition_key"] = aws.ToString(v) + } + + return tfMap +} + +func flattenPipeTargetLambdaFunctionParameters(apiObject *types.PipeTargetLambdaFunctionParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.InvocationType; v != "" { + tfMap["invocation_type"] = v + } + + return tfMap +} + +func flattenPipeTargetRedshiftDataParameters(apiObject *types.PipeTargetRedshiftDataParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{ + "with_event": apiObject.WithEvent, + } + + if v := apiObject.Database; v != nil { + tfMap["database"] = aws.ToString(v) + } + + if v := apiObject.DbUser; v != nil { + tfMap["db_user"] = aws.ToString(v) + } + + if v := apiObject.SecretManagerArn; v != nil { + tfMap["secret_manager_arn"] = aws.ToString(v) + } + + if v := apiObject.Sqls; v != nil { + tfMap["sqls"] = v + } + + if v := apiObject.StatementName; v != nil { + tfMap["statement_name"] = aws.ToString(v) + } + + return tfMap +} + +func flattenPipeTargetSageMakerPipelineParameters(apiObject *types.PipeTargetSageMakerPipelineParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.PipelineParameterList; v != nil { + tfMap["pipeline_parameter"] = flattenSageMakerPipelineParameters(v) + } + + return tfMap +} + +func flattenSageMakerPipelineParameter(apiObject types.SageMakerPipelineParameter) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.ToString(v) + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) + } + + return tfMap +} + +func flattenSageMakerPipelineParameters(apiObjects []types.SageMakerPipelineParameter) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenSageMakerPipelineParameter(apiObject)) + } + + return tfList +} + +func flattenPipeTargetSQSQueueParameters(apiObject *types.PipeTargetSqsQueueParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.MessageDeduplicationId; v != nil { + tfMap["message_deduplication_id"] = aws.ToString(v) + } + + if v := apiObject.MessageGroupId; v != nil { + tfMap["message_group_id"] = aws.ToString(v) + } + + return tfMap +} + +func flattenPipeTargetStateMachineParameters(apiObject *types.PipeTargetStateMachineParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.InvocationType; v != "" { + tfMap["invocation_type"] = v + } + + return tfMap +} diff --git a/internal/service/pipes/test-fixtures/lambdatest.zip b/internal/service/pipes/test-fixtures/lambdatest.zip new file mode 100644 index 00000000000..5c636e955b2 Binary files /dev/null and b/internal/service/pipes/test-fixtures/lambdatest.zip differ diff --git a/internal/service/pipes/wait.go b/internal/service/pipes/wait.go deleted file mode 100644 index 16f5cf03146..00000000000 --- a/internal/service/pipes/wait.go +++ /dev/null @@ -1,70 +0,0 @@ -package pipes - -import ( - "context" - "errors" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func waitPipeCreated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{pipeStatusCreating}, - Target: []string{pipeStatusRunning, pipeStatusStopped}, - Refresh: statusPipe(ctx, conn, id), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 1, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { - tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) - - return output, err - } - - return nil, err -} - -func waitPipeUpdated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{pipeStatusUpdating}, - Target: []string{pipeStatusRunning, pipeStatusStopped}, - Refresh: statusPipe(ctx, conn, id), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 1, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { - tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) - - return output, err - } - - return nil, err -} - -func waitPipeDeleted(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{pipeStatusDeleting}, - Target: []string{}, - Refresh: statusPipe(ctx, conn, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { - tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) - - return output, err - } - - return nil, err -} diff --git a/internal/types/zero.go b/internal/types/zero.go new file mode 100644 index 00000000000..4e5bc37bdd0 --- /dev/null +++ b/internal/types/zero.go @@ -0,0 +1,10 @@ +package types + +import ( + "reflect" +) + +// IsZero returns true if `v` is `nil` or points to the zero value of `T`. +func IsZero[T any](v *T) bool { + return v == nil || reflect.ValueOf(*v).IsZero() +} diff --git a/internal/types/zero_test.go b/internal/types/zero_test.go new file mode 100644 index 00000000000..d8e38702a50 --- /dev/null +++ b/internal/types/zero_test.go @@ -0,0 +1,52 @@ +package types + +import ( + "testing" +) + +type AIsZero struct { + Key string + Value int +} + +func TestIsZero(t *testing.T) { + t.Parallel() + + testCases := []struct { + Name string + Ptr *AIsZero + Expected bool + }{ + { + Name: "nil pointer", + Expected: true, + }, + { + Name: "pointer to zero value", + Ptr: &AIsZero{}, + Expected: true, + }, + { + Name: "pointer to non-zero value Key", + Ptr: &AIsZero{Key: "test"}, + }, + { + Name: "pointer to non-zero value Value", + Ptr: &AIsZero{Value: 42}, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + + got := IsZero(testCase.Ptr) + + if got != testCase.Expected { + t.Errorf("got %t, expected %t", got, testCase.Expected) + } + }) + } +} diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 96426187c73..766d22abaa0 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -12,6 +12,8 @@ Terraform resource for managing an AWS EventBridge Pipes Pipe. You can find out more about EventBridge Pipes in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html). +EventBridge Pipes are very configurable, and may require IAM permissions to work correctly. More information on the configuration options and IAM permissions can be found in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html). + ~> **Note:** EventBridge was formerly known as CloudWatch Events. The functionality is identical. ## Example Usage @@ -87,9 +89,79 @@ resource "aws_pipes_pipe" "example" { role_arn = aws_iam_role.example.arn source = aws_sqs_queue.source.arn target = aws_sqs_queue.target.arn +} +``` + +### Enrichment Usage + +```terraform +resource "aws_pipes_pipe" "example" { + name = "example-pipe" + role_arn = aws_iam_role.example.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + enrichment = aws_cloudwatch_event_api_destination.example.arn - source_parameters {} - target_parameters {} + enrichment_parameters { + http_parameters = { + "example-header" = "example-value" + "second-example-header" = "second-example-value" + } + + path_parameter_values = ["example-path-param"] + + query_string_parameters = { + "example-query-string" = "example-value" + "second-example-query-string" = "second-example-value" + } + } +} +``` + +### Filter Usage + +```terraform +resource "aws_pipes_pipe" "example" { + name = "example-pipe" + role_arn = aws_iam_role.example.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + source_parameters { + filter_criteria { + filter { + pattern = jsonencode({ + source = ["event-source"] + }) + } + } + } +} +``` + +### SQS Source and Target Configuration Usage + +```terraform +resource "aws_pipes_pipe" "example" { + name = "example-pipe" + role_arn = aws_iam_role.example.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + source_parameters { + sqs_queue_parameters { + batch_size = 1 + maximum_batching_window_in_seconds = 2 + } + } + + target_parameters { + sqs_queue { + message_deduplication_id = "example-dedupe" + message_group_id = "example-group" + } + } } ``` @@ -100,21 +172,44 @@ The following arguments are required: * `role_arn` - (Required) ARN of the role that allows the pipe to send data to the target. * `source` - (Required) Source resource of the pipe (typically an ARN). * `target` - (Required) Target resource of the pipe (typically an ARN). -* `source_parameters` - (Required) Parameters required to set up a source for the pipe. Detailed below. -* `target_parameters` - (Required) Parameters required to set up a target for your pipe. Detailed below. The following arguments are optional: * `description` - (Optional) A description of the pipe. At most 512 characters. * `desired_state` - (Optional) The state the pipe should be in. One of: `RUNNING`, `STOPPED`. * `enrichment` - (Optional) Enrichment resource of the pipe (typically an ARN). Read more about enrichment in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html#pipes-enrichment). +* `enrichment_parameters` - (Optional) Parameters to configure enrichment for your pipe. Detailed below. * `name` - (Optional) Name of the pipe. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. +* `source_parameters` - (Optional) Parameters to configure a source for the pipe. Detailed below. +* `target_parameters` - (Optional) Parameters to configure a target for your pipe. Detailed below. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +### enrichment_parameters Configuration Block + +You can find out more about EventBridge Pipes Enrichment in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/pipes-enrichment.html). + +* `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. Maximum length of 8192 characters. +* `http_parameters` - (Optional) Contains the HTTP parameters to use when the target is a API Gateway REST endpoint or EventBridge ApiDestination. If you specify an API Gateway REST API or EventBridge ApiDestination as a target, you can use this parameter to specify headers, path parameters, and query string keys/values as part of your target invoking request. If you're using ApiDestinations, the corresponding Connection can also have these values configured. In case of any conflicting keys, values from the Connection take precedence. Detailed below. + +#### enrichment_parameters.http_parameters Configuration Block + +* `header_parameters` - (Optional) Key-value mapping of the headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. +* `path_parameter_values` - (Optional) The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). +* `query_string_parameters` - (Optional) Key-value mapping of the query strings that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. + ### source_parameters Configuration Block -* `filter_criteria` - (Optional) The collection of event patterns used to filter events. Detailed below. +You can find out more about EventBridge Pipes Sources in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-source.html). + +* `activemq_broker_parameters` - (Optional) The parameters for using an Active MQ broker as a source. Detailed below. +* `dynamodb_stream_parameters` - (Optional) The parameters for using a DynamoDB stream as a source. Detailed below. +* `filter_criteria` - (Optional) The collection of event patterns used to [filter events](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html). Detailed below. +* `kinesis_stream_parameters` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. +* `managed_streaming_kafka_parameters` - (Optional) The parameters for using an MSK stream as a source. Detailed below. +* `rabbitmq_broker_parameters` - (Optional) The parameters for using a Rabbit MQ broker as a source. Detailed below. +* `self_managed_kafka_parameters` - (Optional) The parameters for using a self-managed Apache Kafka stream as a source. Detailed below. +* `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a source. Detailed below. #### source_parameters.filter_criteria Configuration Block @@ -124,9 +219,302 @@ The following arguments are optional: * `pattern` - (Required) The event pattern. At most 4096 characters. +#### source_parameters.activemq_broker_parameters Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `credentials` - (Required) The credentials needed to access the resource. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `queue_name` - (Required) The name of the destination queue to consume. Maximum length of 1000. + +##### source_parameters.activemq_broker_parameters.credentials Configuration Block + +* `basic_auth` - (Required) The ARN of the Secrets Manager secret containing the basic auth credentials. + +#### source_parameters.dynamodb_stream_parameters Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `dead_letter_config` - (Optional) Define the target queue to send dead-letter queue events to. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `maximum_record_age_in_seconds` - (Optional) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records. Maximum value of 604,800. +* `maximum_retry_attempts` - (Optional) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source. Maximum value of 10,000. +* `on_partial_batch_item_failure` - (Optional) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch. Valid values: AUTOMATIC_BISECT. +* `parallelization_factor` - (Optional)The number of batches to process concurrently from each shard. The default value is 1. Maximum value of 10. +* `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. + +##### source_parameters.dynamodb_stream_parameters.dead_letter_config Configuration Block + +* `arn` - (Optional) The ARN of the Amazon SQS queue specified as the target for the dead-letter queue. + +#### source_parameters.kinesis_stream_parameters Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `dead_letter_config` - (Optional) Define the target queue to send dead-letter queue events to. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `maximum_record_age_in_seconds` - (Optional) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records. Maximum value of 604,800. +* `maximum_retry_attempts` - (Optional) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source. Maximum value of 10,000. +* `on_partial_batch_item_failure` - (Optional) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch. Valid values: AUTOMATIC_BISECT. +* `parallelization_factor` - (Optional)The number of batches to process concurrently from each shard. The default value is 1. Maximum value of 10. +* `starting_position` - (Required) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST, AT_TIMESTAMP. +* `starting_position_timestamp` - (Optional) With StartingPosition set to AT_TIMESTAMP, the time from which to start reading, in Unix time seconds. + +##### source_parameters.kinesis_stream_parameters.dead_letter_config Configuration Block + +* `arn` - (Optional) The ARN of the Amazon SQS queue specified as the target for the dead-letter queue. + +#### source_parameters.managed_streaming_kafka_parameters Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `consumer_group_id` - (Optional) The name of the destination queue to consume. Maximum value of 200. +* `credentials` - (Optional) The credentials needed to access the resource. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. +* `topic_name` - (Required) The name of the topic that the pipe will read from. Maximum length of 249. + +##### source_parameters.managed_streaming_kafka_parameters.credentials Configuration Block + +* `client_certificate_tls_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. +* `sasl_scram_512_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. + +#### source_parameters.rabbitmq_broker_parameters Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `credentials` - (Required) The credentials needed to access the resource. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `queue_name` - (Required) The name of the destination queue to consume. Maximum length of 1000. +* `virtual_host` - (Optional) The name of the virtual host associated with the source broker. Maximum length of 200. + +##### source_parameters.rabbitmq_broker_parameters.credentials Configuration Block + +* `basic_auth` - (Required) The ARN of the Secrets Manager secret containing the credentials. + +#### source_parameters.self_managed_kafka_parameters Configuration Block + +* `additional_bootstrap_servers` - (Optional) An array of server URLs. Maximum number of 2 items, each of maximum length 300. +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `consumer_group_id` - (Optional) The name of the destination queue to consume. Maximum value of 200. +* `credentials` - (Optional) The credentials needed to access the resource. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `server_root_ca_certificate` - (Optional) The ARN of the Secrets Manager secret used for certification. +* `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. +* `topic_name` - (Required) The name of the topic that the pipe will read from. Maximum length of 249. +* `vpc` - (Optional) This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used. Detailed below. + +##### source_parameters.self_managed_kafka_parameters.credentials Configuration Block + +* `basic_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. +* `client_certificate_tls_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. +* `sasl_scram_256_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. +* `sasl_scram_512_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. + +##### source_parameters.self_managed_kafka_parameters.vpc Configuration Block + +* `security_groups` - (Optional) List of security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. +* `subnets` - (Optional) List of the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets. + +#### source_parameters.sqs_queue_parameters Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. + ### target_parameters Configuration Block -* `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. +You can find out more about EventBridge Pipes Targets in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-target.html). + +* `batch_job_parameters` - (Optional) The parameters for using an AWS Batch job as a target. Detailed below. +* `cloudwatch_logs_parameters` - (Optional) The parameters for using an CloudWatch Logs log stream as a target. Detailed below. +* `ecs_task_parameters` - (Optional) The parameters for using an Amazon ECS task as a target. Detailed below. +* `eventbridge_event_bus_parameters` - (Optional) The parameters for using an EventBridge event bus as a target. Detailed below. +* `http_parameters` - (Optional) These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations. Detailed below. +* `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. Maximum length of 8192 characters. +* `kinesis_stream_parameters` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. +* `lambda_function_parameters` - (Optional) The parameters for using a Lambda function as a target. Detailed below. +* `redshift_data_parameters` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. +* `sagemaker_pipeline_parameters` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. +* `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. +* `step_function_state_machine_parameters` - (Optional) The parameters for using a Step Functions state machine as a target. Detailed below. + +#### target_parameters.batch_job_parameters Configuration Block + +* `array_properties` - (Optional) The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. This parameter is used only if the target is an AWS Batch job. Detailed below. +* `container_overrides` - (Optional) The overrides that are sent to a container. Detailed below. +* `depends_on` - (Optional) A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a SEQUENTIAL type dependency without specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an N_TO_N type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each dependency to complete before it can begin. Detailed below. +* `job_definition` - (Required) The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. If name is specified without a revision then the latest active revision is used. +* `job_name` - (Required) The name of the job. It can be up to 128 letters long. +* `parameters` - (Optional) Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition. Detailed below. +* `retry_strategy` - (Optional) The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition. Detailed below. + +##### target_parameters.batch_job_parameters.array_properties Configuration Block + +* `size` - (Optional) The size of the array, if this is an array batch job. Minimum value of 2. Maximum value of 10,000. + +##### target_parameters.batch_job_parameters.container_overrides Configuration Block + +* `command` - (Optional) List of commands to send to the container that overrides the default command from the Docker image or the task definition. +* `environment` - (Optional) The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. Environment variables cannot start with " AWS Batch ". This naming convention is reserved for variables that AWS Batch sets. Detailed below. +* `instance_type` - (Optional) The instance type to use for a multi-node parallel job. This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided. +* `resource_requirement` - (Optional) The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU, MEMORY, and VCPU. Detailed below. + +###### target_parameters.batch_job_parameters.container_overrides.environment Configuration Block + +* `name` - (Optional) The name of the key-value pair. For environment variables, this is the name of the environment variable. +* `value` - (Optional) The value of the key-value pair. For environment variables, this is the value of the environment variable. + +###### target_parameters.batch_job_parameters.container_overrides.resource_requirement Configuration Block + +* `type` - (Optional) The type of resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU. +* `value` - (Optional) The quantity of the specified resource to reserve for the container. [The values vary based on the type specified](https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/API_BatchResourceRequirement.html). + +##### target_parameters.batch_job_parameters.depends_on Configuration Block + +* `job_id` - (Optional) The job ID of the AWS Batch job that's associated with this dependency. +* `type` - (Optional) The type of the job dependency. Valid Values: N_TO_N, SEQUENTIAL. + +##### target_parameters.batch_job_parameters.retry_strategy Configuration Block + +* `attempts` - (Optional) The number of times to move a job to the RUNNABLE status. If the value of attempts is greater than one, the job is retried on failure the same number of attempts as the value. Maximum value of 10. + +#### target_parameters.cloudwatch_logs_parameters Configuration Block + +* `log_stream_name` - (Optional) The name of the log stream. +* `timestamp` - (Optional) The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. This is the JSON path to the field in the event e.g. $.detail.timestamp + +#### target_parameters.ecs_task_parameters Configuration Block + +* `capacity_provider_strategy` - (Optional) List of capacity provider strategies to use for the task. If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used. Detailed below. +* `enable_ecs_managed_tags` - (Optional) Specifies whether to enable Amazon ECS managed tags for the task. Valid values: true, false. +* `enable_execute_command` - (Optional) Whether or not to enable the execute command functionality for the containers in this task. If true, this enables execute command functionality on all containers in the task. Valid values: true, false. +* `group` - (Optional) Specifies an Amazon ECS task group for the task. The maximum length is 255 characters. +* `launch_type` - (Optional) Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where AWS Fargate with Amazon ECS is supported. Valid Values: EC2, FARGATE, EXTERNAL +* `network_configuration` - (Optional) Use this structure if the Amazon ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks. If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails. Detailed below. +* `overrides` - (Optional) The overrides that are associated with a task. Detailed below. +* `placement_constraint` - (Optional) An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime). Detailed below. +* `placement_strategy` - (Optional) The placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task. Detailed below. +* `platform_version` - (Optional) Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. This structure is used only if LaunchType is FARGATE. +* `propagate_tags` - (Optional) Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action. Valid Values: TASK_DEFINITION +* `reference_id` - (Optional) The reference ID to use for the task. Maximum length of 1,024. +* `tags` - (Optional) Key-value map of tags that you apply to the task to help you categorize and organize them. +* `task_count` - (Optional) The number of tasks to create based on TaskDefinition. The default is 1. +* `task_definition_arn` - (Optional) The ARN of the task definition to use if the event target is an Amazon ECS task. + +##### target_parameters.ecs_task_parameters.capacity_provider_strategy Configuration Block + +* `base` - (Optional) The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. If no value is specified, the default value of 0 is used. Maximum value of 100,000. +* `capacity_provider` - (Optional) The short name of the capacity provider. Maximum value of 255. +* `weight` - (Optional) The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied. Maximum value of 1,000. + +##### target_parameters.ecs_task_parameters.network_configuration Configuration Block + +* `aws_vpc_configuration` - (Optional) Use this structure to specify the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode. Detailed below. + +###### target_parameters.ecs_task_parameters.network_configuration.aws_vpc_configuration Configuration Block + +* `assign_public_ip` - (Optional) Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE. Valid Values: ENABLED, DISABLED. +* `security_groups` - (Optional) Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. +* `subnets` - (Optional) Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets. + +##### target_parameters.ecs_task_parameters.overrides Configuration Block + +* `container_override` - (Optional) One or more container overrides that are sent to a task. Detailed below. +* `cpu` - (Optional) The cpu override for the task. +* `ephemeral_storage` - (Optional) The ephemeral storage setting override for the task. Detailed below. +* `execution_role_arn` - (Optional) The Amazon Resource Name (ARN) of the task execution IAM role override for the task. +* `inference_accelerator_override` - (Optional) List of Elastic Inference accelerator overrides for the task. Detailed below. +* `memory` - (Optional) The memory override for the task. +* `task_role_arn` - (Optional) The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. + +###### target_parameters.ecs_task_parameters.overrides.container_override Configuration Block + +* `command` - (Optional) List of commands to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name. +* `cpu` - (Optional) The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name. +* `environment` - (Optional) The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. You must also specify a container name. Detailed below. +* `environment_file` - (Optional) A list of files containing the environment variables to pass to a container, instead of the value from the container definition. Detailed below. +* `memory` - (Optional) The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name. +* `memory_reservation` - (Optional) The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name. +* `name` - (Optional) The name of the container that receives the override. This parameter is required if any override is specified. +* `resource_requirement` - (Optional) The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU. Detailed below. + +###### target_parameters.ecs_task_parameters.overrides.container_override.environment Configuration Block + +* `name` - (Optional) The name of the key-value pair. For environment variables, this is the name of the environment variable. +* `value` - (Optional) The value of the key-value pair. For environment variables, this is the value of the environment variable. + +###### target_parameters.ecs_task_parameters.overrides.container_override.environment_file Configuration Block + +* `type` - (Optional) The file type to use. The only supported value is s3. +* `value` - (Optional) The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file. + +###### target_parameters.ecs_task_parameters.overrides.container_override.resource_requirement Configuration Block + +* `type` - (Optional) The type of resource to assign to a container. The supported values are GPU or InferenceAccelerator. +* `value` - (Optional) The value for the specified resource type. If the GPU type is used, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on. If the InferenceAccelerator type is used, the value matches the deviceName for an InferenceAccelerator specified in a task definition. + +###### target_parameters.ecs_task_parameters.overrides.ephemeral_storage Configuration Block + +* `size_in_gib` - (Required) The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21 GiB and the maximum supported value is 200 GiB. + +###### target_parameters.ecs_task_parameters.overrides.inference_accelerator_override Configuration Block + +* `device_name` - (Optional) The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName specified in the task definition. +* `device_type` - (Optional) The Elastic Inference accelerator type to use. + +##### target_parameters.ecs_task_parameters.placement_constraint Configuration Block + +* `expression` - (Optional) A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. Maximum length of 2,000. +* `type` - (Optional) The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates. Valid Values: distinctInstance, memberOf. + +##### target_parameters.ecs_task_parameters.placement_strategy Configuration Block + +* `field` - (Optional) The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used. Maximum length of 255. +* `type` - (Optional) The type of placement strategy. The random placement strategy randomly places tasks on available candidates. The spread placement strategy spreads placement across available candidates evenly based on the field parameter. The binpack strategy places tasks on available candidates that have the least available amount of the resource that is specified with the field parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory (but still enough to run the task). Valid Values: random, spread, binpack. + +#### target_parameters.eventbridge_event_bus_parameters Configuration Block + +* `detail_type` - (Optional) A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail. +* `endpoint_id` - (Optional) The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo. +* `resources` - (Optional) List of AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present. +* `source` - (Optional) The source of the event. Maximum length of 256. +* `time` - (Optional) The time stamp of the event, per RFC3339. If no time stamp is provided, the time stamp of the PutEvents call is used. This is the JSON path to the field in the event e.g. $.detail.timestamp + +#### target_parameters.http_parameters Configuration Block + +* `header_parameters` - (Optional) Key-value mapping of the headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. +* `path_parameter_values` - (Optional) The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). +* `query_string_parameters` - (Optional) Key-value mapping of the query strings that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. + +#### target_parameters.kinesis_stream_parameters Configuration Block + +* `partition_key` - (Required) Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. + +#### target_parameters.lambda_function_parameters Configuration Block + +* `invocation_type` - (Optional) Specify whether to invoke the function synchronously or asynchronously. Valid Values: REQUEST_RESPONSE, FIRE_AND_FORGET. + +#### target_parameters.redshift_data_parameters Configuration Block + +* `database` - (Required) The name of the database. Required when authenticating using temporary credentials. +* `db_user` - (Optional) The database user name. Required when authenticating using temporary credentials. +* `secret_manager_arn` - (Optional) The name or ARN of the secret that enables access to the database. Required when authenticating using Secrets Manager. +* `sqls` - (Optional) List of SQL statements text to run, each of maximum length of 100,000. +* `statement_name` - (Optional) The name of the SQL statement. You can name the SQL statement when you create it to identify the query. +* `with_event` - (Optional) Indicates whether to send an event back to EventBridge after the SQL statement runs. + +#### target_parameters.sagemaker_pipeline_parameters Configuration Block + +* `pipeline_parameter` - (Optional) List of Parameter names and values for SageMaker Model Building Pipeline execution. Detailed below. + +##### target_parameters.sagemaker_pipeline_parameters.parameters Configuration Block + +* `name` - (Optional) Name of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 256. +* `value` - (Optional) Value of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 1024. + +#### target_parameters.sqs_queue_parameters Configuration Block + +* `message_deduplication_id` - (Optional) This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of sent messages. +* `message_group_id` - (Optional) The FIFO message group ID to use as the target. + +#### target_parameters.step_function_state_machine_parameters Configuration Block + +* `invocation_type` - (Optional) Specify whether to invoke the function synchronously or asynchronously. Valid Values: REQUEST_RESPONSE, FIRE_AND_FORGET. ## Attributes Reference