diff --git a/.changelog/29783.txt b/.changelog/29783.txt new file mode 100644 index 00000000000..14bccc96b9a --- /dev/null +++ b/.changelog/29783.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_ecs_task_execution +``` diff --git a/internal/service/ecs/service_package_gen.go b/internal/service/ecs/service_package_gen.go index f77ff991f17..238a1461798 100644 --- a/internal/service/ecs/service_package_gen.go +++ b/internal/service/ecs/service_package_gen.go @@ -27,6 +27,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) map[string]func() * "aws_ecs_container_definition": DataSourceContainerDefinition, "aws_ecs_service": DataSourceService, "aws_ecs_task_definition": DataSourceTaskDefinition, + "aws_ecs_task_execution": DataSourceTaskExecution, } } diff --git a/internal/service/ecs/task_execution_data_source.go b/internal/service/ecs/task_execution_data_source.go new file mode 100644 index 00000000000..0057cce6b3d --- /dev/null +++ b/internal/service/ecs/task_execution_data_source.go @@ -0,0 +1,477 @@ +package ecs + +import ( + "context" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKDataSource("aws_ecs_task_execution") +func DataSourceTaskExecution() *schema.Resource { + return &schema.Resource{ + ReadWithoutTimeout: dataSourceTaskExecutionRead, + + Schema: map[string]*schema.Schema{ + "capacity_provider_strategy": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "base": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100000), + }, + "capacity_provider": { + Type: schema.TypeString, + Required: true, + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 1000), + }, + }, + }, + }, + "cluster": { + Type: schema.TypeString, + Required: true, + }, + "desired_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 10), + }, + "enable_ecs_managed_tags": { + Type: schema.TypeBool, + Optional: true, + }, + "enable_execute_command": { + Type: schema.TypeBool, + Optional: true, + }, + "group": { + Type: schema.TypeString, + Optional: true, + }, + "launch_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(ecs.LaunchType_Values(), false), + }, + "network_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "security_groups": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "subnets": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "assign_public_ip": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "overrides": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_overrides": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "command": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "cpu": { + Type: schema.TypeInt, + Optional: true, + }, + "environment": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "memory": { + Type: schema.TypeInt, + Optional: true, + }, + "memory_reservation": { + Type: schema.TypeInt, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "resource_requirements": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(ecs.ResourceType_Values(), false), + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "cpu": { + Type: schema.TypeString, + Optional: true, + }, + "execution_role_arn": { + Type: schema.TypeString, + Optional: true, + }, + "inference_accelerator_overrides": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Optional: true, + }, + "device_type": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "memory": { + Type: schema.TypeString, + Optional: true, + }, + "task_role_arn": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "placement_constraints": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(ecs.PlacementConstraintType_Values(), false), + }, + }, + }, + }, + "placement_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "platform_version": { + Type: schema.TypeString, + Optional: true, + }, + "propagate_tags": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(ecs.PropagateTags_Values(), false), + }, + "reference_id": { + Type: schema.TypeString, + Optional: true, + }, + "started_by": { + Type: schema.TypeString, + Optional: true, + }, + "tags": tftags.TagsSchema(), + "task_arns": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "task_definition": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +const ( + DSNameTaskExecution = "Task Execution Data Source" +) + +func dataSourceTaskExecutionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).ECSConn() + + cluster := d.Get("cluster").(string) + taskDefinition := d.Get("task_definition").(string) + d.SetId(strings.Join([]string{cluster, taskDefinition}, ",")) + + input := ecs.RunTaskInput{ + Cluster: aws.String(cluster), + TaskDefinition: aws.String(taskDefinition), + } + + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(ctx, d.Get("tags").(map[string]interface{}))) + if len(tags) > 0 { + input.Tags = Tags(tags.IgnoreAWS()) + } + + if v, ok := d.GetOk("capacity_provider_strategy"); ok { + input.CapacityProviderStrategy = expandCapacityProviderStrategy(v.(*schema.Set)) + } + if v, ok := d.GetOk("desired_count"); ok { + input.Count = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("enable_ecs_managed_tags"); ok { + input.EnableECSManagedTags = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("enable_execute_command"); ok { + input.EnableExecuteCommand = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("group"); ok { + input.Group = aws.String(v.(string)) + } + if v, ok := d.GetOk("launch_type"); ok { + input.LaunchType = aws.String(v.(string)) + } + if v, ok := d.GetOk("network_configuration"); ok { + input.NetworkConfiguration = expandNetworkConfiguration(v.([]interface{})) + } + if v, ok := d.GetOk("overrides"); ok { + input.Overrides = expandTaskOverride(v.([]interface{})) + } + if v, ok := d.GetOk("placement_constraints"); ok { + pc, err := expandPlacementConstraints(v.(*schema.Set).List()) + if err != nil { + return create.DiagError(names.ECS, create.ErrActionCreating, DSNameTaskExecution, d.Id(), err) + } + input.PlacementConstraints = pc + } + if v, ok := d.GetOk("placement_strategy"); ok { + ps, err := expandPlacementStrategy(v.([]interface{})) + if err != nil { + return create.DiagError(names.ECS, create.ErrActionCreating, DSNameTaskExecution, d.Id(), err) + } + input.PlacementStrategy = ps + } + if v, ok := d.GetOk("platform_version"); ok { + input.PlatformVersion = aws.String(v.(string)) + } + if v, ok := d.GetOk("propagate_tags"); ok { + input.PropagateTags = aws.String(v.(string)) + } + if v, ok := d.GetOk("reference_id"); ok { + input.ReferenceId = aws.String(v.(string)) + } + if v, ok := d.GetOk("started_by"); ok { + input.StartedBy = aws.String(v.(string)) + } + + out, err := conn.RunTaskWithContext(ctx, &input) + if err != nil { + return create.DiagError(names.ECS, create.ErrActionCreating, DSNameTaskExecution, d.Id(), err) + } + if out == nil || len(out.Tasks) == 0 { + return create.DiagError(names.ECS, create.ErrActionCreating, DSNameTaskExecution, d.Id(), tfresource.NewEmptyResultError(input)) + } + + var taskArns []*string + for _, t := range out.Tasks { + taskArns = append(taskArns, t.TaskArn) + } + d.Set("task_arns", flex.FlattenStringList(taskArns)) + + return diags +} + +func expandTaskOverride(tfList []interface{}) *ecs.TaskOverride { + if len(tfList) == 0 { + return nil + } + apiObject := &ecs.TaskOverride{} + tfMap := tfList[0].(map[string]interface{}) + + if v, ok := tfMap["cpu"]; ok { + apiObject.Cpu = aws.String(v.(string)) + } + if v, ok := tfMap["memory"]; ok { + apiObject.Memory = aws.String(v.(string)) + } + if v, ok := tfMap["execution_role_arn"]; ok { + apiObject.ExecutionRoleArn = aws.String(v.(string)) + } + if v, ok := tfMap["task_role_arn"]; ok { + apiObject.TaskRoleArn = aws.String(v.(string)) + } + if v, ok := tfMap["inference_accelerator_overrides"]; ok { + apiObject.InferenceAcceleratorOverrides = expandInferenceAcceleratorOverrides(v.([]interface{})) + } + if v, ok := tfMap["container_overrides"]; ok { + apiObject.ContainerOverrides = expandContainerOverride(v.([]interface{})) + } + + return apiObject +} + +func expandInferenceAcceleratorOverrides(tfList []interface{}) []*ecs.InferenceAcceleratorOverride { + if len(tfList) == 0 { + return nil + } + apiObject := make([]*ecs.InferenceAcceleratorOverride, 0) + + for _, item := range tfList { + tfMap := item.(map[string]interface{}) + iao := &ecs.InferenceAcceleratorOverride{ + DeviceName: aws.String(tfMap["device_name"].(string)), + DeviceType: aws.String(tfMap["device_type"].(string)), + } + apiObject = append(apiObject, iao) + } + + return apiObject +} + +func expandContainerOverride(tfList []interface{}) []*ecs.ContainerOverride { + if len(tfList) == 0 { + return nil + } + apiObject := make([]*ecs.ContainerOverride, 0) + + for _, item := range tfList { + tfMap := item.(map[string]interface{}) + co := &ecs.ContainerOverride{ + Name: aws.String(tfMap["name"].(string)), + } + if v, ok := tfMap["command"]; ok { + commandStrings := v.([]interface{}) + co.Command = flex.ExpandStringList(commandStrings) + } + if v, ok := tfMap["cpu"]; ok { + co.Cpu = aws.Int64(v.(int64)) + } + if v, ok := tfMap["environment"]; ok { + co.Environment = expandTaskEnvironment(v.([]interface{})) + } + if v, ok := tfMap["memory"]; ok { + co.Memory = aws.Int64(v.(int64)) + } + if v, ok := tfMap["memory_reservation"]; ok { + co.Memory = aws.Int64(v.(int64)) + } + if v, ok := tfMap["resource_requirements"]; ok { + co.ResourceRequirements = expandResourceRequirements(v.([]interface{})) + } + apiObject = append(apiObject, co) + } + + return apiObject +} + +func expandTaskEnvironment(tfList []interface{}) []*ecs.KeyValuePair { + if len(tfList) == 0 { + return nil + } + apiObject := make([]*ecs.KeyValuePair, 0) + + for _, item := range tfList { + tfMap := item.(map[string]interface{}) + te := &ecs.KeyValuePair{ + Name: aws.String(tfMap["name"].(string)), + Value: aws.String(tfMap["value"].(string)), + } + apiObject = append(apiObject, te) + } + + return apiObject +} + +func expandResourceRequirements(tfList []interface{}) []*ecs.ResourceRequirement { + if len(tfList) == 0 { + return nil + } + + apiObject := make([]*ecs.ResourceRequirement, 0) + for _, item := range tfList { + tfMap := item.(map[string]interface{}) + rr := &ecs.ResourceRequirement{ + Type: aws.String(tfMap["type"].(string)), + Value: aws.String(tfMap["value"].(string)), + } + apiObject = append(apiObject, rr) + } + + return apiObject +} diff --git a/internal/service/ecs/task_execution_data_source_test.go b/internal/service/ecs/task_execution_data_source_test.go new file mode 100644 index 00000000000..a09d63eaa7b --- /dev/null +++ b/internal/service/ecs/task_execution_data_source_test.go @@ -0,0 +1,181 @@ +package ecs_test + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/ecs" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" +) + +func TestAccECSTaskExecutionDataSource_basic(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_ecs_task_execution.test" + clusterName := "aws_ecs_cluster.test" + taskDefinitionName := "aws_ecs_task_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckPartitionHasService(t, ecs.EndpointsID) + }, + ErrorCheck: acctest.ErrorCheck(t, ecs.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccTaskExecutionDataSourceConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "cluster", clusterName, "id"), + resource.TestCheckResourceAttrPair(dataSourceName, "task_definition", taskDefinitionName, "arn"), + resource.TestCheckResourceAttr(dataSourceName, "desired_count", "1"), + resource.TestCheckResourceAttr(dataSourceName, "launch_type", "FARGATE"), + resource.TestCheckResourceAttr(dataSourceName, "network_configuration.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "task_arns.#", "1"), + ), + }, + }, + }) +} + +func TestAccECSTaskExecutionDataSource_tags(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_ecs_task_execution.test" + clusterName := "aws_ecs_cluster.test" + taskDefinitionName := "aws_ecs_task_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckPartitionHasService(t, ecs.EndpointsID) + }, + ErrorCheck: acctest.ErrorCheck(t, ecs.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccTaskExecutionDataSourceConfig_tags(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "cluster", clusterName, "id"), + resource.TestCheckResourceAttrPair(dataSourceName, "task_definition", taskDefinitionName, "arn"), + resource.TestCheckResourceAttr(dataSourceName, "desired_count", "1"), + resource.TestCheckResourceAttr(dataSourceName, "launch_type", "FARGATE"), + resource.TestCheckResourceAttr(dataSourceName, "network_configuration.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "task_arns.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(dataSourceName, "tags.key1", "value1"), + ), + }, + }, + }) +} + +func testAccTaskExecutionDataSourceConfig_base(rName string) string { + return fmt.Sprintf(` +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + protocol = "6" + from_port = 80 + to_port = 8000 + cidr_blocks = [aws_vpc.test.cidr_block] + } + + tags = { + Name = %[1]q + } +} + +resource "aws_ecs_cluster" "test" { + name = %[1]q +} + +resource "aws_ecs_cluster_capacity_providers" "test" { + cluster_name = aws_ecs_cluster.test.name + capacity_providers = ["FARGATE"] +} + +resource "aws_ecs_task_definition" "test" { + family = %[1]q + network_mode = "awsvpc" + requires_compatibilities = ["FARGATE"] + cpu = "256" + memory = "512" + + container_definitions = jsonencode([ + { + name = "sleep" + image = "busybox" + cpu = 10 + command = ["sleep", "10"] + memory = 10 + essential = true + portMappings = [ + { + protocol = "tcp" + containerPort = 8000 + } + ] + } + ]) +} +`, rName) +} + +func testAccTaskExecutionDataSourceConfig_basic(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 2), + testAccTaskExecutionDataSourceConfig_base(rName), + ` +data "aws_ecs_task_execution" "test" { + depends_on = [aws_ecs_cluster_capacity_providers.test] + + cluster = aws_ecs_cluster.test.id + task_definition = aws_ecs_task_definition.test.arn + desired_count = 1 + launch_type = "FARGATE" + + network_configuration { + subnets = aws_subnet.test[*].id + security_groups = [aws_security_group.test.id] + assign_public_ip = false + } +} +`) +} + +func testAccTaskExecutionDataSourceConfig_tags(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 2), + testAccTaskExecutionDataSourceConfig_base(rName), + fmt.Sprintf(` +data "aws_ecs_task_execution" "test" { + depends_on = [aws_ecs_cluster_capacity_providers.test] + + cluster = aws_ecs_cluster.test.id + task_definition = aws_ecs_task_definition.test.arn + desired_count = 1 + launch_type = "FARGATE" + + network_configuration { + subnets = aws_subnet.test[*].id + security_groups = [aws_security_group.test.id] + assign_public_ip = false + } + + tags = { + %[1]q = %[2]q + } +} +`, tagKey1, tagValue1)) +} diff --git a/website/docs/d/ecs_task_execution.html.markdown b/website/docs/d/ecs_task_execution.html.markdown new file mode 100644 index 00000000000..6ea191b3e5a --- /dev/null +++ b/website/docs/d/ecs_task_execution.html.markdown @@ -0,0 +1,124 @@ +--- +subcategory: "ECS (Elastic Container)" +layout: "aws" +page_title: "AWS: aws_ecs_task_execution" +description: |- + Terraform data source for managing an AWS ECS (Elastic Container) Task Execution. +--- + +# Data Source: aws_ecs_task_execution + +Terraform data source for managing an AWS ECS (Elastic Container) Task Execution. This data source calls the [RunTask](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) API, allowing execution of one-time tasks that don't fit a standard resource lifecycle. See the [feature request issue](https://github.com/hashicorp/terraform-provider-aws/issues/1703) for additional context. + +~> **NOTE on plan operations:** This data source calls the `RunTask` API on every read operation, which means new task(s) may be created from a `terraform plan` command if all attributes are known. Placing this functionality behind a data source is an intentional trade off to enable use cases requiring a one-time task execution without relying on [provisioners](https://developer.hashicorp.com/terraform/language/resources/provisioners/syntax). Caution should be taken to ensure the data source is only executed once, or that the resulting tasks can safely run in parallel. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_ecs_task_execution" "example" { + cluster = aws_ecs_cluster.example.id + task_definition = aws_ecs_task_definition.example.arn + desired_count = 1 + launch_type = "FARGATE" + + network_configuration { + subnets = aws_subnet.example[*].id + security_groups = [aws_security_group.example.id] + assign_public_ip = false + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `cluster` - (Required) Short name or full Amazon Resource Name (ARN) of the cluster to run the task on. +* `task_definition` - (Required) The `family` and `revision` (`family:revision`) or full ARN of the task definition to run. If a revision isn't specified, the latest `ACTIVE` revision is used. + +The following arguments are optional: + +* `capacity_provider_strategy` - (Optional) Set of capacity provider strategies to use for the cluster. See below. +* `desired_count` - (Optional) Number of instantiations of the specified task to place on your cluster. You can specify up to 10 tasks for each call. +* `enable_ecs_managed_tags` - (Optional) Specifies whether to enable Amazon ECS managed tags for the tasks within the service. +* `enable_execute_command` - (Optional) Specifies whether to enable Amazon ECS Exec for the tasks within the service. +* `group` - (Optional) Name of the task group to associate with the task. The default value is the family name of the task definition. +* `launch_type` - (Optional) Launch type on which to run your service. Valid values are `EC2`, `FARGATE`, and `EXTERNAL`. +* `network_configuration` - (Optional) Network configuration for the service. This parameter is required for task definitions that use the `awsvpc` network mode to receive their own Elastic Network Interface, and it is not supported for other network modes. See below. +* `overrides` - (Optional) A list of container overrides that specify the name of a container in the specified task definition and the overrides it should receive. +* `placement_constraints` - (Optional) An array of placement constraint objects to use for the task. You can specify up to 10 constraints for each task. See below. +* `placement_strategy` - (Optional) The placement strategy objects to use for the task. You can specify a maximum of 5 strategy rules for each task. See below. +* `platform_version` - (Optional) The platform version the task uses. A platform version is only specified for tasks hosted on Fargate. If one isn't specified, the `LATEST` platform version is used. +* `propagate_tags` - (Optional) Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. An error will be received if you specify the `SERVICE` option when running a task. Valid values are `TASK_DEFINITION` or `NONE`. +* `reference_id` - (Optional) The reference ID to use for the task. +* `started_by` - (Optional) An optional tag specified when a task is started. +* `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### capacity_provider_strategy + +* `capacity_provider` - (Required) Name of the capacity provider. +* `base` - (Optional) The number of tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. Defaults to `0`. +* `weight` - (Optional) The relative percentage of the total number of launched tasks that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` count of tasks has been satisfied. Defaults to `0`. + +### network_configuration + +* `subnets` - (Required) Subnets associated with the task or service. +* `security_groups` - (Optional) Security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. +* `assign_public_ip` - (Optional) Assign a public IP address to the ENI (Fargate launch type only). Valid values are `true` or `false`. Default `false`. + +For more information, see the [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) documentation. + +### overrides + +* `container_overrides` - (Optional) One or more container overrides that are sent to a task. See below. +* `cpu` - (Optional) The CPU override for the task. +* `execution_role_arn` - (Optional) Amazon Resource Name (ARN) of the task execution role override for the task. +* `inference_accelerator_overrides` - (Optional) Elastic Inference accelerator override for the task. See below. +* `memory` - (Optional) The memory override for the task. +* `task_role_arn` - (Optional) Amazon Resource Name (ARN) of the role that containers in this task can assume. + +### container_overrides + +* `command` - (Optional) The command to send to the container that overrides the default command from the Docker image or the task definition. +* `cpu` - (Optional) The number of cpu units reserved for the container, instead of the default value from the task definition. +* `environment` - (Optional) The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. See below. +* `memory` - (Optional) The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. +* `memory_reservation` - (Optional) The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. +* `name` - (Optional) The name of the container that receives the override. This parameter is required if any override is specified. +* `resource_requirements` - (Optional) The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU. See below. + +### environment + +* `key` - (Required) The name of the key-value pair. For environment variables, this is the name of the environment variable. +* `value` - (Required) The value of the key-value pair. For environment variables, this is the value of the environment variable. + +### resource_requirements + +* `type` - (Required) The type of resource to assign to a container. Valid values are `GPU` or `InferenceAccelerator`. +* `value` - (Required) The value for the specified resource type. If the `GPU` type is used, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on. If the `InferenceAccelerator` type is used, the value matches the `deviceName` for an InferenceAccelerator specified in a task definition. + +### inference_accelerator_overrides + +* `device_name` - (Optional) The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName specified in the task definition. +* `device_type` - (Optional) The Elastic Inference accelerator type to use. + +### placement_constraints + +* `expression` - (Optional) A cluster query language expression to apply to the constraint. The expression can have a maximum length of 2000 characters. You can't specify an expression if the constraint type is `distinctInstance`. +* `type` - (Optional) The type of constraint. Valid values are `distinctInstance` or `memberOf`. Use `distinctInstance` to ensure that each task in a particular group is running on a different container instance. Use `memberOf` to restrict the selection to a group of valid candidates. + +### placement_strategy + +* `field` - (Optional) The field to apply the placement strategy against. +* `type` - (Optional) The type of placement strategy. Valid values are `random`, `spread`, and `binpack`. + +For more information, see the [Placement Strategy](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementStrategy.html) documentation. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `task_arns` - A list of the provisioned task ARNs. +* `id` - The unique identifier, which is a comma-delimited string joining the `cluster` and `task_definition` attributes.