diff --git a/codegen/sdk-codegen/aws-models/ec2.json b/codegen/sdk-codegen/aws-models/ec2.json index 3ba777c8494..70860006fa4 100644 --- a/codegen/sdk-codegen/aws-models/ec2.json +++ b/codegen/sdk-codegen/aws-models/ec2.json @@ -10506,7 +10506,7 @@ "target": "com.amazonaws.ec2#CreateCustomerGatewayResult" }, "traits": { - "smithy.api#documentation": "
Provides information to Amazon Web Services about your VPN customer gateway device. The\n customer gateway is the appliance at your end of the VPN connection. (The device on the\n Amazon Web Services side of the VPN connection is the virtual private gateway.) You\n must provide the internet-routable IP address of the customer gateway's external\n interface. The IP address must be static and can be behind a device performing network\n address translation (NAT).
\nFor devices that use Border Gateway Protocol (BGP), you can also provide the device's\n BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network.\n If you don't have an ASN already, you can use a private ASN. For more information, see \n Customer gateway \n options for your Site-to-Site VPN connection in the Amazon Web Services Site-to-Site VPN User Guide.
\nTo create more than one customer gateway with the same VPN type, IP address, and\n BGP ASN, specify a unique device name for each customer gateway. An identical request\n returns information about the existing customer gateway; it doesn't create a new customer\n gateway.
" + "smithy.api#documentation": "Provides information to Amazon Web Services about your customer gateway device. The\n customer gateway device is the appliance at your end of the VPN connection. You\n must provide the IP address of the customer gateway device’s external\n interface. The IP address must be static and can be behind a device performing network\n address translation (NAT).
\nFor devices that use Border Gateway Protocol (BGP), you can also provide the device's\n BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network.\n If you don't have an ASN already, you can use a private ASN. For more information, see \n Customer gateway \n options for your Site-to-Site VPN connection in the Amazon Web Services Site-to-Site VPN User Guide.
\nTo create more than one customer gateway with the same VPN type, IP address, and\n BGP ASN, specify a unique device name for each customer gateway. An identical request\n returns information about the existing customer gateway; it doesn't create a new customer\n gateway.
" } }, "com.amazonaws.ec2#CreateCustomerGatewayRequest": { @@ -10522,8 +10522,7 @@ "PublicIp": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The Internet-routable IP address for the customer gateway's outside interface. The\n address must be static.
", - "smithy.api#xmlName": "IpAddress" + "smithy.api#documentation": "\n This member has been deprecated. The Internet-routable IP address for the customer gateway's outside interface. The\n address must be static.
" } }, "CertificateArn": { @@ -10552,6 +10551,12 @@ "smithy.api#documentation": "A name for the customer gateway device.
\nLength Constraints: Up to 255 characters.
" } }, + "IpAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "\n IPv4 address for the customer gateway device's outside interface. The address must be static.\n
" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -15679,7 +15684,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "IpAddress", - "smithy.api#documentation": "The Internet-routable IP address of the customer gateway's outside interface.
", + "smithy.api#documentation": "The IP address of the customer gateway device's outside interface.
", "smithy.api#xmlName": "ipAddress" } }, @@ -20616,7 +20621,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "One or more filters.
\n\n bgp-asn
- The customer gateway's Border Gateway Protocol (BGP)\n Autonomous System Number (ASN).
\n customer-gateway-id
- The ID of the customer gateway.
\n ip-address
- The IP address of the customer gateway's\n Internet-routable external interface.
\n state
- The state of the customer gateway (pending
|\n available
| deleting
|\n deleted
).
\n type
- The type of customer gateway. Currently, the only\n supported type is ipsec.1
.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
One or more filters.
\n\n bgp-asn
- The customer gateway's Border Gateway Protocol (BGP)\n Autonomous System Number (ASN).
\n customer-gateway-id
- The ID of the customer gateway.
\n ip-address
- The IP address of the customer gateway\n device's external interface.
\n state
- The state of the customer gateway (pending
|\n available
| deleting
|\n deleted
).
\n type
- The type of customer gateway. Currently, the only\n supported type is ipsec.1
.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
The type of IPv4 address assigned to the outside interface of the customer gateway.
\nValid values: PrivateIpv4
| PublicIpv4
\n
Default: PublicIpv4
\n
The transit gateway attachment ID in use for the VPN tunnel.
", + "smithy.api#xmlName": "transportTransitGatewayAttachmentId" + } + }, "TunnelInsideIpVersion": { "target": "com.amazonaws.ec2#TunnelInsideIpVersion", "traits": { @@ -80182,6 +80203,18 @@ "traits": { "smithy.api#documentation": "The IPv6 CIDR on the Amazon Web Services side of the VPN connection.
\nDefault: ::/0
\n
The type of IPv4 address assigned to the outside interface of the customer gateway device.
\nValid values: PrivateIpv4
| PublicIpv4
\n
Default: PublicIpv4
\n
The transit gateway attachment ID to use for the VPN tunnel.
\nRequired if OutsideIpAddressType
is set to PrivateIpv4
.
Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes\n\t\t\tit easy to run, stop, and manage Docker containers on a cluster. You can host your\n\t\t\tcluster on a serverless infrastructure that's managed by Amazon ECS by launching your\n\t\t\tservices or tasks on Fargate. For more control, you can host your tasks on a cluster\n\t\t\tof Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage.
\n\t\tAmazon ECS makes it easy to launch and stop container-based applications with simple API\n\t\t\tcalls. This makes it easy to get the state of your cluster from a centralized service,\n\t\t\tand gives you access to many familiar Amazon EC2 features.
\n\t\tYou can use Amazon ECS to schedule the placement of containers across your cluster based on\n\t\t\tyour resource needs, isolation policies, and availability requirements. With Amazon ECS, you\n\t\t\tdon't need to operate your own cluster management and configuration management systems.\n\t\t\tYou also don't need to worry about scaling your management infrastructure.
", + "smithy.api#documentation": "Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes\n\t\t\tit easy to run, stop, and manage Docker containers. You can host your cluster on a\n\t\t\tserverless infrastructure that's managed by Amazon ECS by launching your services or tasks on\n\t\t\tFargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2)\n\t\t\tor External (on-premises) instances that you manage.
\n\t\tAmazon ECS makes it easy to launch and stop container-based applications with simple API\n\t\t\tcalls. This makes it easy to get the state of your cluster from a centralized service,\n\t\t\tand gives you access to many familiar Amazon EC2 features.
\n\t\tYou can use Amazon ECS to schedule the placement of containers across your cluster based on\n\t\t\tyour resource needs, isolation policies, and availability requirements. With Amazon ECS, you\n\t\t\tdon't need to operate your own cluster management and configuration management systems.\n\t\t\tYou also don't need to worry about scaling your management infrastructure.
", "smithy.api#title": "Amazon EC2 Container Service", "smithy.api#xmlNamespace": { "uri": "http://ecs.amazonaws.com/doc/2014-11-13/" @@ -369,7 +369,7 @@ } }, "traits": { - "smithy.api#documentation": "An attribute is a name-value pair that's associated with an Amazon ECS object. Use attributes\n\t\t\tto extend the Amazon ECS data model by adding custom metadata to your resources.\n\t\t\tFor more information, see Attributes in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "An attribute is a name-value pair that's associated with an Amazon ECS object. Use\n\t\t\tattributes to extend the Amazon ECS data model by adding custom metadata to your resources.\n\t\t\tFor more information, see Attributes in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#AttributeLimitExceededException": { @@ -694,7 +694,7 @@ "clusterArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the cluster, the Amazon Web Services account ID of the cluster owner, the cluster
namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test
.
The Amazon Resource Name (ARN) that identifies the cluster. For more information about the ARN\n\t\t\tformat, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
" } }, "clusterName": { @@ -772,13 +772,13 @@ "attachments": { "target": "com.amazonaws.ecs#Attachments", "traits": { - "smithy.api#documentation": "The resources attached to a cluster. When using a capacity provider with a cluster,\n\t\t\tthe Auto Scaling plan that's created is returned as a cluster attachment.
" + "smithy.api#documentation": "The resources attached to a cluster. When using a capacity provider with a cluster,\n\t\t\tthe capacity provider and associated resources are returned as cluster\n\t\t\tattachments.
" } }, "attachmentsStatus": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The status of the capacity providers associated with the cluster. The following are\n\t\t\tthe states that are returned.
\n\t\tThe available capacity providers for the cluster are updating. This occurs\n\t\t\t\t\t\twhen the Auto Scaling plan is provisioning or deprovisioning.
\n\t\t\t\tThe capacity providers have successfully updated.
\n\t\t\t\tThe capacity provider updates failed.
\n\t\t\t\tThe status of the capacity providers associated with the cluster. The following are\n\t\t\tthe states that are returned.
\n\t\tThe available capacity providers for the cluster are updating.
\n\t\t\t\tThe capacity providers have successfully updated.
\n\t\t\t\tThe capacity provider updates failed.
\n\t\t\t\tThe amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory
value, if one is specified. This parameter maps to\n\t\t\t\tMemory
in the Create a container section of the\n\t\t\tDocker Remote API and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
\n\t\tIf using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory
and memoryReservation
value, memory
\n\t\t\tmust be greater than memoryReservation
. If you specify\n\t\t\t\tmemoryReservation
, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of\n\t\t\tmemory for a container, so you should not specify fewer than 6 MiB of\n\t\t\tmemory for your containers.
\n\t\tThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB\n\t\t\tof memory for a container, so you should not specify fewer than 4 MiB of\n\t\t\tmemory for your containers.
" + "smithy.api#documentation": "The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory
value, if one is specified. This parameter maps to\n\t\t\t\tMemory
in the Create a container section of the\n\t\t\tDocker Remote API and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
\n\t\tIf using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory
and memoryReservation
value, memory
\n\t\t\tmust be greater than memoryReservation
. If you specify\n\t\t\t\tmemoryReservation
, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.
\n\t\tThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.
" } }, "memoryReservation": { @@ -1217,7 +1217,7 @@ "stopTimeout": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "Time duration (in seconds) to wait before the container is forcefully killed if it\n\t\t\tdoesn't exit normally on its own.
\n\t\tFor tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\n\t\tLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
The max stop timeout value is 120 seconds and if the parameter is not specified, the\n\t\t\tdefault value of 30 seconds is used.
\n\t\tFor tasks that use the EC2 launch type, if the stopTimeout
\n\t\t\tparameter isn't specified, the value set for the Amazon ECS container agent configuration\n\t\t\tvariable ECS_CONTAINER_STOP_TIMEOUT
is used. If neither the\n\t\t\t\tstopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
\n\t\t\tagent configuration variable are set, then the default values of 30 seconds for Linux\n\t\t\tcontainers and 30 seconds on Windows containers are used. Your container instances\n\t\t\trequire at least version 1.26.0 of the container agent to use a container stop\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init
package. If your container instances are launched from version\n\t\t\t\t20190301
or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before the container is forcefully killed if it\n\t\t\tdoesn't exit normally on its own.
\n\t\tFor tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\n\t\tLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
The max stop timeout value is 120 seconds and if the parameter is not specified, the\n\t\t\tdefault value of 30 seconds is used.
\n\t\tFor tasks that use the EC2 launch type, if the stopTimeout
\n\t\t\tparameter isn't specified, the value set for the Amazon ECS container agent configuration\n\t\t\tvariable ECS_CONTAINER_STOP_TIMEOUT
is used. If neither the\n\t\t\t\tstopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
\n\t\t\tagent configuration variable are set, then the default values of 30 seconds for Linux\n\t\t\tcontainers and 30 seconds on Windows containers are used. Your container instances\n\t\t\trequire at least version 1.26.0 of the container agent to use a container stop timeout\n\t\t\tvalue. However, we recommend using the latest container agent version. For information\n\t\t\tabout checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your\n\t\t\tinstance needs at least version 1.26.0-1 of the ecs-init
package. If your\n\t\t\tcontainer instances are launched from version 20190301
or later, then they\n\t\t\tcontain the required versions of the container agent and ecs-init
. For more\n\t\t\tinformation, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the container instance, the Amazon Web Services account ID of the container instance owner, the container-instance
namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID
.
The Amazon Resource Name (ARN) of the container instance. For more information about the ARN format,\n\t\t\tsee Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
" } }, "ec2InstanceId": { @@ -1421,7 +1421,7 @@ "status": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The status of the container instance. The valid values are REGISTERING
,\n\t\t\t\tREGISTRATION_FAILED
, ACTIVE
, INACTIVE
,\n\t\t\t\tDEREGISTERING
, or DRAINING
.
If your account has opted in to the awsvpcTrunking
account setting, then\n\t\t\tany newly registered container instance will transition to a REGISTERING
\n\t\t\tstatus while the trunk elastic network interface is provisioned for the instance. If the\n\t\t\tregistration fails, the instance will transition to a REGISTRATION_FAILED
\n\t\t\tstatus. You can describe the container instance and see the reason for failure in the\n\t\t\t\tstatusReason
parameter. Once the container instance is terminated, the\n\t\t\tinstance transitions to a DEREGISTERING
status while the trunk elastic\n\t\t\tnetwork interface is deprovisioned. The instance then transitions to an\n\t\t\t\tINACTIVE
status.
The ACTIVE
status indicates that the container instance can accept tasks.\n\t\t\tThe DRAINING
indicates that new tasks aren't placed on the container\n\t\t\tinstance and any service tasks running on the container instance are removed if\n\t\t\tpossible. For more information, see Container Instance Draining in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
The status of the container instance. The valid values are REGISTERING
,\n\t\t\t\tREGISTRATION_FAILED
, ACTIVE
, INACTIVE
,\n\t\t\t\tDEREGISTERING
, or DRAINING
.
If your account has opted in to the awsvpcTrunking
account setting, then\n\t\t\tany newly registered container instance will transition to a REGISTERING
\n\t\t\tstatus while the trunk elastic network interface is provisioned for the instance. If the\n\t\t\tregistration fails, the instance will transition to a REGISTRATION_FAILED
\n\t\t\tstatus. You can describe the container instance and see the reason for failure in the\n\t\t\t\tstatusReason
parameter. Once the container instance is terminated, the\n\t\t\tinstance transitions to a DEREGISTERING
status while the trunk elastic\n\t\t\tnetwork interface is deprovisioned. The instance then transitions to an\n\t\t\t\tINACTIVE
status.
The ACTIVE
status indicates that the container instance can accept tasks.\n\t\t\tThe DRAINING
indicates that new tasks aren't placed on the container\n\t\t\tinstance and any service tasks running on the container instance are removed if\n\t\t\tpossible. For more information, see Container instance draining in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
This parameter returns true
if the agent is connected to Amazon ECS.\n\t\t\tRegistered instances with an agent that may be unhealthy or stopped return\n\t\t\t\tfalse
. Only instances connected to an agent can accept placement\n\t\t\trequests.
This parameter returns true
if the agent is connected to Amazon ECS. An\n\t\t\tinstance with an agent that may be unhealthy or stopped return false
. Only\n\t\t\tinstances connected to an agent can accept task placement requests.
The resources attached to a container instance, such as elastic network\n\t\t\tinterfaces.
" + "smithy.api#documentation": "The resources attached to a container instance, such as an elastic network\n\t\t\tinterface.
" } }, "tags": { @@ -1486,7 +1486,7 @@ } }, "traits": { - "smithy.api#documentation": "An EC2 instance that's running the Amazon ECS agent and has been registered with a\n\t\t\tcluster.
" + "smithy.api#documentation": "An Amazon EC2 or External instance that's running the Amazon ECS agent and has been registered\n\t\t\twith a cluster.
" } }, "com.amazonaws.ecs#ContainerInstanceField": { @@ -1772,7 +1772,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new Amazon ECS cluster. By default, your account receives a default
\n\t\t\tcluster when you launch your first container instance. However, you can create your own\n\t\t\tcluster with a unique name with the CreateCluster
action.
When you call the CreateCluster API operation, Amazon ECS attempts to\n\t\t\t\tcreate the Amazon ECS service-linked role for your account. This is so that it can manage\n\t\t\t\trequired resources in other Amazon Web Services services on your behalf. However, if the IAM user\n\t\t\t\tthat makes the call doesn't have permissions to create the service-linked role, it\n\t\t\t\tisn't created. For more information, see Using\n\t\t\t\t\tService-Linked Roles for Amazon ECS in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tCreates a new Amazon ECS cluster. By default, your account receives a default
\n\t\t\tcluster when you launch your first container instance. However, you can create your own\n\t\t\tcluster with a unique name with the CreateCluster
action.
When you call the CreateCluster API operation, Amazon ECS attempts to\n\t\t\t\tcreate the Amazon ECS service-linked role for your account. This is so that it can manage\n\t\t\t\trequired resources in other Amazon Web Services services on your behalf. However, if the IAM user\n\t\t\t\tthat makes the call doesn't have permissions to create the service-linked role, it\n\t\t\t\tisn't created. For more information, see Using\n\t\t\t\t\tservice-linked roles for Amazon ECS in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tRuns and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount
,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.
In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service Load Balancing in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tTasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING
state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
There are two service scheduler strategies available:
\n\t\t\n REPLICA
- The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service Scheduler Concepts in the\n\t\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n DAEMON
- The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service Scheduler Concepts in the\n\t\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent
is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING
state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING
state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING
state.\n\t\t\tThis is while the container instances are in the DRAINING
state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster using the following logic:
\n\t\tDetermine which of the container instances in your cluster can support the\n\t\t\t\t\ttask definition of your service. For example, they have the required CPU,\n\t\t\t\t\tmemory, ports, and container instance attributes.
\n\t\t\tBy default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner. This is the case even if you can choose a\n\t\t\t\t\tdifferent placement strategy with the placementStrategy
\n\t\t\t\t\tparameter.
Sort the valid container instances, giving priority to instances that\n\t\t\t\t\t\t\thave the fewest number of running tasks for this service in their\n\t\t\t\t\t\t\trespective Availability Zone. For example, if zone A has one running\n\t\t\t\t\t\t\tservice task and zones B and C each have zero, valid container instances\n\t\t\t\t\t\t\tin either zone B or C are considered optimal for placement.
\n\t\t\t\t\tPlace the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone based on the previous steps, favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.
\n\t\t\t\t\tRuns and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount
,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.
In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tTasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING
state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING
state and are reported as\n\t\t\thealthy by the load balancer.
There are two service scheduler strategies available:
\n\t\t\n REPLICA
- The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the\n\t\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n DAEMON
- The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the\n\t\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent
is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING
state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING
state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING
state.\n\t\t\tThis is while the container instances are in the DRAINING
state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster using the following logic:
\n\t\tDetermine which of the container instances in your cluster can support the\n\t\t\t\t\ttask definition of your service. For example, they have the required CPU,\n\t\t\t\t\tmemory, ports, and container instance attributes.
\n\t\t\tBy default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner. This is the case even if you can choose a\n\t\t\t\t\tdifferent placement strategy with the placementStrategy
\n\t\t\t\t\tparameter.
Sort the valid container instances, giving priority to instances that\n\t\t\t\t\t\t\thave the fewest number of running tasks for this service in their\n\t\t\t\t\t\t\trespective Availability Zone. For example, if zone A has one running\n\t\t\t\t\t\t\tservice task and zones B and C each have zero, valid container instances\n\t\t\t\t\t\t\tin either zone B or C are considered optimal for placement.
\n\t\t\t\t\tPlace the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone based on the previous steps, favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.
\n\t\t\t\t\tA load balancer object representing the load balancers to use with your service. For\n\t\t\tmore information, see Service Load Balancing in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tIf the service uses the rolling update (ECS
) deployment controller and\n\t\t\tusing either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach\n\t\t\tto the service. The service-linked role is required for services that use multiple\n\t\t\ttarget groups. For more information, see Using service-linked roles for Amazon ECS in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
If the service uses the CODE_DEPLOY
deployment controller, the service is\n\t\t\trequired to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you\n\t\t\tspecify two target groups (referred to as a targetGroupPair
). During a\n\t\t\tdeployment, CodeDeploy determines which task set in your service has the status\n\t\t\t\tPRIMARY
, and it associates one target group with it. Then, it also\n\t\t\tassociates the other target group with the replacement task set. The load balancer can\n\t\t\talso have up to two listeners: a required listener for production traffic and an\n\t\t\toptional listener that you can use to perform validation tests with Lambda functions\n\t\t\tbefore routing production traffic to it.
If you use the CODE_DEPLOY
deployment controller, these values can be changed\n\t\t\twhen updating the service.
For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN,\n\t\t\tthe container name, and the container port to access from the load balancer. The\n\t\t\tcontainer name must be as it appears in a container definition. The load balancer name\n\t\t\tparameter must be omitted. When a task from this service is placed on a container\n\t\t\tinstance, the container instance and port combination is registered as a target in the\n\t\t\ttarget group that's specified here.
\n\t\tFor Classic Load Balancers, this object must contain the load balancer name, the container name , and\n\t\t\tthe container port to access from the load balancer. The container name must be as it\n\t\t\tappears in a container definition. The target group ARN parameter must be omitted. When\n\t\t\ta task from this service is placed on a container instance, the container instance is\n\t\t\tregistered with the load balancer that's specified here.
\n\t\tServices with tasks that use the awsvpc
network mode (for example, those\n\t\t\twith the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers\n\t\t\taren't supported. Also, when you create any target groups for these services, you must\n\t\t\tchoose ip
as the target type, not instance
. This is because\n\t\t\ttasks that use the awsvpc
network mode are associated with an elastic\n\t\t\tnetwork interface, not an Amazon EC2 instance.
A load balancer object representing the load balancers to use with your service. For\n\t\t\tmore information, see Service load balancing in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tIf the service uses the rolling update (ECS
) deployment controller and\n\t\t\tusing either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach\n\t\t\tto the service. The service-linked role is required for services that use multiple\n\t\t\ttarget groups. For more information, see Using service-linked roles for Amazon ECS in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
If the service uses the CODE_DEPLOY
deployment controller, the service is\n\t\t\trequired to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you\n\t\t\tspecify two target groups (referred to as a targetGroupPair
). During a\n\t\t\tdeployment, CodeDeploy determines which task set in your service has the status\n\t\t\t\tPRIMARY
, and it associates one target group with it. Then, it also\n\t\t\tassociates the other target group with the replacement task set. The load balancer can\n\t\t\talso have up to two listeners: a required listener for production traffic and an\n\t\t\toptional listener that you can use to perform validation tests with Lambda functions\n\t\t\tbefore routing production traffic to it.
If you use the CODE_DEPLOY
deployment controller, these values can be\n\t\t\tchanged when updating the service.
For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN,\n\t\t\tthe container name, and the container port to access from the load balancer. The\n\t\t\tcontainer name must be as it appears in a container definition. The load balancer name\n\t\t\tparameter must be omitted. When a task from this service is placed on a container\n\t\t\tinstance, the container instance and port combination is registered as a target in the\n\t\t\ttarget group that's specified here.
\n\t\tFor Classic Load Balancers, this object must contain the load balancer name, the container name , and\n\t\t\tthe container port to access from the load balancer. The container name must be as it\n\t\t\tappears in a container definition. The target group ARN parameter must be omitted.\n\t\t\tWhen a task from this service is placed on a container instance, the container instance\n\t\t\tis registered with the load balancer that's specified here.
\n\t\tServices with tasks that use the awsvpc
network mode (for example, those\n\t\t\twith the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers\n\t\t\taren't supported. Also, when you create any target groups for these services, you must\n\t\t\tchoose ip
as the target type, not instance
. This is because\n\t\t\ttasks that use the awsvpc
network mode are associated with an elastic\n\t\t\tnetwork interface, not an Amazon EC2 instance.
The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0
is used.
If you do not use an Elastic Load Balancing, we recomend that you use the startPeriod
in the task definition healtch check parameters. For more information, see Health check.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you\n\t\t\tcan specify a health check grace period of up to\n\t\t\t2,147,483,647\n\t\t\tseconds (about 69 years). During that time, the Amazon ECS service\n\t\t\tscheduler ignores health check status. This grace period can prevent the service\n\t\t\tscheduler from marking tasks as unhealthy and stopping them before they have time to\n\t\t\tcome up.
" + "smithy.api#documentation": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0
is used.
If you do not use an Elastic Load Balancing, we recomend that you use the startPeriod
in\n\t\t\tthe task definition healtch check parameters. For more information, see Health\n\t\t\t\tcheck.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you\n\t\t\tcan specify a health check grace period of up to\n\t\t\t2,147,483,647\n\t\t\tseconds (about 69 years). During that time, the Amazon ECS service\n\t\t\tscheduler ignores health check status. This grace period can prevent the service\n\t\t\tscheduler from marking tasks as unhealthy and stopping them before they have time to\n\t\t\tcome up.
" } }, "schedulingStrategy": { @@ -1986,7 +1986,7 @@ "enableECSManagedTags": { "target": "com.amazonaws.ecs#Boolean", "traits": { - "smithy.api#documentation": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For\n\t\t\tmore information, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For\n\t\t\tmore information, see Tagging your Amazon ECS\n\t\t\t\tresources in the Amazon Elastic Container Service Developer Guide.
" } }, "propagateTags": { @@ -2055,7 +2055,7 @@ } ], "traits": { - "smithy.api#documentation": "Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL
deployment controller type. For more information, see\n\t\t\t\tAmazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.
Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL
deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.
The details of the service discovery registries to assign to this task set. For more\n\t\t\tinformation, see Service\n\t\t\t\tDiscovery.
" + "smithy.api#documentation": "The details of the service discovery registries to assign to this task set. For more\n\t\t\tinformation, see Service\n\t\t\t\tdiscovery.
" } }, "launchType": { "target": "com.amazonaws.ecs#LaunchType", "traits": { - "smithy.api#documentation": "The launch type that new tasks in the task set uses. For more information, see Amazon ECS\n\t\t\t\tLaunch Types in the Amazon Elastic Container Service Developer Guide.
\n\t\tIf a launchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
The launch type that new tasks in the task set uses. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.
\n\t\tIf a launchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
Deletes a specified task set within a service. This is used when a service uses the\n\t\t\t\tEXTERNAL
deployment controller type. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
Deletes a specified task set within a service. This is used when a service uses the\n\t\t\t\tEXTERNAL
deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
Determines whether to use the deployment circuit breaker logic for the\n\t\t\tservice.
", + "smithy.api#documentation": "Determines whether to use the deployment circuit breaker logic for the service.
", "smithy.api#required": {} } }, @@ -2637,7 +2637,7 @@ } }, "traits": { - "smithy.api#documentation": "The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS
) deployment type that aren't behind a Classic Load Balancer.
The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If enabled, a\n\t\t\tservice deployment will transition to a failed state and stop launching new tasks. You\n\t\t\tcan also configure Amazon ECS to roll back your service to the last completed deployment after a\n\t\t\tfailure. For more information, see Rolling\n\t\t\t\tupdate in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS
) deployment type that aren't behind a Classic Load Balancer.
The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If enabled, a\n\t\t\tservice deployment will transition to a failed state and stop launching new tasks. You\n\t\t\tcan also configure Amazon ECS to roll back your service to the last completed deployment\n\t\t\tafter a failure. For more information, see Rolling\n\t\t\t\tupdate in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#DeploymentConfiguration": { @@ -2652,13 +2652,13 @@ "maximumPercent": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tmaximum percent parameter represents an upper limit\n\t\t\ton the number of tasks in a service that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment, as a percentage of the desired\n\t\t\tnumber of tasks (rounded down to the nearest integer), and while any container instances\n\t\t\tare in the DRAINING
state if the service contains tasks using the\n\t\t\tEC2 launch type. This parameter enables you to define the deployment batch\n\t\t\tsize. For example, if your service has a desired number of four tasks and a maximum\n\t\t\tpercent value of 200%, the scheduler may start four new tasks before stopping the four\n\t\t\tolder tasks (provided that the cluster resources required to do this are available). The\n\t\t\tdefault value for maximum percent is 200%.
If a service is using the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and tasks that use the EC2\n\t\t\tlaunch type, the maximum percent value is set to the\n\t\t\tdefault value and is used to define the upper limit on the number of the tasks in the\n\t\t\tservice that remain in the RUNNING
state while the container instances are\n\t\t\tin the DRAINING
state. If the tasks in the service use the\n\t\t\tFargate launch type, the maximum percent value is not used, although it is\n\t\t\treturned when describing your service.
If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tmaximumPercent
parameter represents an upper limit on the number of\n\t\t\tyour service's tasks that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment, as a percentage of the\n\t\t\t\tdesiredCount
(rounded down to the nearest integer). This parameter\n\t\t\tenables you to define the deployment batch size. For example, if your service is using\n\t\t\tthe REPLICA
service scheduler and has a desiredCount
of four\n\t\t\ttasks and a maximumPercent
value of 200%, the scheduler may start four new\n\t\t\ttasks before stopping the four older tasks (provided that the cluster resources required\n\t\t\tto do this are available). The default maximumPercent
value for a service\n\t\t\tusing the REPLICA
service scheduler is 200%.
If a service is using either the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and tasks that use the EC2\n\t\t\tlaunch type, the maximum percent value is set to the\n\t\t\tdefault value and is used to define the upper limit on the number of the tasks in the\n\t\t\tservice that remain in the RUNNING
state while the container instances are\n\t\t\tin the DRAINING
state. If the tasks in the service use the\n\t\t\tFargate launch type, the maximum percent value is not used, although it is\n\t\t\treturned when describing your service.
If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tminimum healthy percent represents a lower limit on\n\t\t\tthe number of tasks in a service that must remain in the RUNNING
state\n\t\t\tduring a deployment, as a percentage of the desired number of tasks (rounded up to the\n\t\t\tnearest integer), and while any container instances are in the DRAINING
\n\t\t\tstate if the service contains tasks using the EC2 launch type. This\n\t\t\tparameter enables you to deploy without using additional cluster capacity. For example,\n\t\t\tif your service has a desired number of four tasks and a minimum healthy percent of 50%,\n\t\t\tthe scheduler may stop two existing tasks to free up cluster capacity before starting\n\t\t\ttwo new tasks. Tasks for services that do not use a load balancer\n\t\t\tare considered healthy if they're in the RUNNING
state; tasks for services\n\t\t\tthat do use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING
state and they're reported as healthy by the load balancer. The\n\t\t\tdefault value for minimum healthy percent is 100%.
If a service is using the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and tasks that use the EC2\n\t\t\tlaunch type, the minimum healthy percent value is set\n\t\t\tto the default value and is used to define the lower limit on the number of the tasks in\n\t\t\tthe service that remain in the RUNNING
state while the container instances\n\t\t\tare in the DRAINING
state. If the tasks in the service use the\n\t\t\tFargate launch type, the minimum healthy percent value is not used,\n\t\t\talthough it is returned when describing your service.
If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tminimumHealthyPercent
represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING
state during a deployment,\n\t\t\tas a percentage of the desiredCount
(rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount
of four tasks and a\n\t\t\t\tminimumHealthyPercent
of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.
For services that do not use a load balancer, the following\n\t\t\tshould be noted:
\n\t\tA service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.
\n\t\t\tIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING
\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.
If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.
\n\t\t\tFor services are that do use a load balancer, the following\n\t\t\tshould be noted:
\n\t\tIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.
\n\t\t\tIf a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.
\n\t\t\tIf a service is using either the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value and is used to define the lower\n\t\t\tlimit on the number of the tasks in the service that remain in the RUNNING
\n\t\t\tstate while the container instances are in the DRAINING
state. If a service\n\t\t\tis using either the blue/green (CODE_DEPLOY
) or EXTERNAL
\n\t\t\tdeployment types and is running tasks that use the Fargate launch type,\n\t\t\tthe minimum healthy percent value is not used, although it is returned when describing\n\t\t\tyour service.
The deployment controller to use for the service. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The deployment controller to use for the service. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#DeploymentControllerType": { @@ -2763,7 +2763,7 @@ "containerInstance": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The container instance ID or full ARN of the container instance to deregister.\n\t\t\tThe ARN contains the arn:aws:ecs
namespace, followed by the Region of the container instance, the Amazon Web Services account ID of the container instance owner, the container-instance
namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID
.
The container instance ID or full ARN of the container instance to deregister. For\n\t\t\tmore information about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
", "smithy.api#required": {} } }, @@ -3080,6 +3080,51 @@ } ], "minDelay": 15 + }, + "ServicesStable": { + "acceptors": [ + { + "state": "failure", + "matcher": { + "output": { + "path": "failures[].reason", + "expected": "MISSING", + "comparator": "anyStringEquals" + } + } + }, + { + "state": "failure", + "matcher": { + "output": { + "path": "services[].status", + "expected": "DRAINING", + "comparator": "anyStringEquals" + } + } + }, + { + "state": "failure", + "matcher": { + "output": { + "path": "services[].status", + "expected": "INACTIVE", + "comparator": "anyStringEquals" + } + } + }, + { + "state": "success", + "matcher": { + "output": { + "path": "length(services[?!(length(deployments) == `1` && runningCount == desiredCount)]) == `0`", + "expected": "true", + "comparator": "booleanEquals" + } + } + } + ], + "minDelay": 15 } } } @@ -3292,7 +3337,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes a specified task or tasks.
", + "smithy.api#documentation": "Describes a specified task or tasks.
\n\t\tCurrently, stopped tasks appear in the returned results for at least one hour.
", "smithy.waiters#waitable": { "TasksRunning": { "acceptors": [ @@ -3491,7 +3536,7 @@ "containerInstance": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The container instance ID or full ARN of the container instance.\n\t\t\tThe ARN contains the arn:aws:ecs
namespace, followed by the Region of the container instance, the Amazon Web Services account ID of the container instance owner, the container-instance
namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID
.
The container instance ID or full ARN of the container instance. For more\n\t\t\tinformation about the ARN format, see Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
" } }, "cluster": { @@ -3575,13 +3620,13 @@ "accessPointId": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The Amazon EFS access point ID to use. If an access point is specified, the root directory\n\t\t\tvalue specified in the EFSVolumeConfiguration
must either be omitted or set\n\t\t\tto /
which will enforce the path set on the EFS access point. If an access\n\t\t\tpoint is used, transit encryption must be enabled in the\n\t\t\t\tEFSVolumeConfiguration
. For more information, see Working with Amazon\n\t\t\t\tEFS Access Points in the Amazon Elastic File System User Guide.
The Amazon EFS access point ID to use. If an access point is specified, the root directory\n\t\t\tvalue specified in the EFSVolumeConfiguration
must either be omitted or set\n\t\t\tto /
which will enforce the path set on the EFS access point. If an access\n\t\t\tpoint is used, transit encryption must be enabled in the\n\t\t\t\tEFSVolumeConfiguration
. For more information, see Working with Amazon\n\t\t\t\tEFS access points in the Amazon Elastic File System User Guide.
Determines whether to use the Amazon ECS task IAM role defined in a task definition when\n\t\t\tmounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the\n\t\t\t\tEFSVolumeConfiguration
. If this parameter is omitted, the default value\n\t\t\tof DISABLED
is used. For more information, see Using\n\t\t\t\tAmazon EFS Access Points in the Amazon Elastic Container Service Developer Guide.
Determines whether to use the Amazon ECS task IAM role defined in a task definition when\n\t\t\tmounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the\n\t\t\t\tEFSVolumeConfiguration
. If this parameter is omitted, the default value\n\t\t\tof DISABLED
is used. For more information, see Using\n\t\t\t\tAmazon EFS access points in the Amazon Elastic Container Service Developer Guide.
Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS\n\t\t\thost and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization\n\t\t\tis used. If this parameter is omitted, the default value of DISABLED
is\n\t\t\tused. For more information, see Encrypting Data in Transit in\n\t\t\tthe Amazon Elastic File System User Guide.
Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS host\n\t\t\tand the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is\n\t\t\tused. If this parameter is omitted, the default value of DISABLED
is used.\n\t\t\tFor more information, see Encrypting data in transit in\n\t\t\tthe Amazon Elastic File System User Guide.
The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS\n\t\t\tserver. If you do not specify a transit encryption port, it will use the port selection\n\t\t\tstrategy that the Amazon EFS mount helper uses. For more information, see EFS Mount\n\t\t\t\tHelper in the Amazon Elastic File System User Guide.
" + "smithy.api#documentation": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS\n\t\t\tserver. If you do not specify a transit encryption port, it will use the port selection\n\t\t\tstrategy that the Amazon EFS mount helper uses. For more information, see EFS mount\n\t\t\t\thelper in the Amazon Elastic File System User Guide.
" } }, "authorizationConfig": { @@ -3655,7 +3700,7 @@ } }, "traits": { - "smithy.api#documentation": "This parameter is specified when you're using an Amazon Elastic File System file system for task\n\t\t\tstorage. For more information, see Amazon EFS Volumes in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "This parameter is specified when you're using an Amazon Elastic File System file system for task\n\t\t\tstorage. For more information, see Amazon EFS volumes in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#EnvironmentFile": { @@ -3715,7 +3760,7 @@ } }, "traits": { - "smithy.api#documentation": "The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Fargate task\n\t\t\t\tstorage in the Amazon ECS User Guide for Fargate.
\n\t\tThis parameter is only supported for tasks hosted on Fargate using Linux\n\t\t\t\tplatform version 1.4.0
or later. This parameter is not supported for\n\t\t\t\tWindows containers on Fargate.
The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Fargate task\n\t\t\t\tstorage in the Amazon ECS User Guide for Fargate.
\n\t\tThis parameter is only supported for tasks hosted on Fargate using\n\t\t\t\tLinux platform version 1.4.0
or later. This parameter is not supported\n\t\t\t\tfor Windows containers on Fargate.
Runs a command remotely on a container within a task.
" + "smithy.api#documentation": "Runs a command remotely on a container within a task.
\n\t\tIf you use a condition key in your IAM policy to refine the conditions for the policy\n\t\t\tstatement, for example limit the actions to a specific cluster, you recevie an\n\t\t\t\tAccessDeniedException
when there is a mismatch between the condition\n\t\t\tkey value and the corresponding parameter value.
The authorization credential option to use. The authorization credential options can\n\t\t\tbe provided using either the Amazon Resource Name (ARN) of an Secrets Manager secret or SSM\n\t\t\tParameter Store parameter. The ARNs refer to the stored credentials.
", + "smithy.api#documentation": "The authorization credential option to use. The authorization credential options can\n\t\t\tbe provided using either the Amazon Resource Name (ARN) of an Secrets Manager secret or SSM Parameter Store\n\t\t\tparameter. The ARN refers to the stored credentials.
", "smithy.api#required": {} } }, @@ -3931,7 +3976,7 @@ } }, "traits": { - "smithy.api#documentation": "The authorization configuration details for Amazon FSx for Windows File Server file system. See FSxWindowsFileServerVolumeConfiguration in the Amazon Elastic\n\t\t\t\tContainer Service API Reference.
\n\t\tFor more information and the input format, see Amazon FSx for Windows File Server Volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The authorization configuration details for Amazon FSx for Windows File Server file system. See FSxWindowsFileServerVolumeConfiguration in the Amazon ECS API\n\t\t\t\tReference.
\n\t\tFor more information and the input format, see Amazon FSx for Windows File Server Volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#FSxWindowsFileServerVolumeConfiguration": { @@ -3960,7 +4005,7 @@ } }, "traits": { - "smithy.api#documentation": "This parameter is specified when you're using Amazon FSx for Windows File Server file system for task\n\t\t\tstorage.
\n\t\tFor more information and the input format, see Amazon FSx for Windows File Server Volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "This parameter is specified when you're using Amazon FSx for Windows File Server file system for task\n\t\t\tstorage.
\n\t\tFor more information and the input format, see Amazon FSx for Windows File Server volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#Failure": { @@ -4008,12 +4053,12 @@ "options": { "target": "com.amazonaws.ecs#FirelensConfigurationOptionsMap", "traits": { - "smithy.api#documentation": "The options to use when configuring the log router. This field is optional and can be\n\t\t\tused to specify a custom configuration file or to add additional metadata, such as the\n\t\t\ttask, task definition, cluster, and container instance details to the log event. If\n\t\t\tspecified, the syntax to use is\n\t\t\t\t\"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}
.\n\t\t\tFor more information, see Creating\n\t\t\t\ta Task Definition that Uses a FireLens Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
Tasks hosted on Fargate only support the file
configuration file\n\t\t\t\ttype.
The options to use when configuring the log router. This field is optional and can be\n\t\t\tused to specify a custom configuration file or to add additional metadata, such as the\n\t\t\ttask, task definition, cluster, and container instance details to the log event. If\n\t\t\tspecified, the syntax to use is\n\t\t\t\t\"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}
.\n\t\t\tFor more information, see Creating\n\t\t\t\ta task definition that uses a FireLens configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
Tasks hosted on Fargate only support the file
configuration file\n\t\t\t\ttype.
The FireLens configuration for the container. This is used to specify and configure a\n\t\t\tlog router for container logs. For more information, see Custom Log Routing\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The FireLens configuration for the container. This is used to specify and configure a\n\t\t\tlog router for container logs. For more information, see Custom log routing\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#FirelensConfigurationOptionsMap": { @@ -4082,7 +4127,7 @@ } }, "traits": { - "smithy.api#documentation": "An object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile).
\n\t\tThe Amazon ECS container agent only monitors and reports on the health\n\t\t\t\tchecks specified in the task definition. Amazon ECS does not monitor\n\t\t\t\tDocker health checks that are embedded in a container image and not\n\t\t\t\tspecified in the container definition. Health check parameters that\n\t\t\t\tare specified in a container definition override any Docker health\n\t\t\t\tchecks that exist in the container image.
\n\t\tYou can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.
\n\t\tThe following describes the possible healthStatus
values for a\n\t\t\tcontainer:
\n HEALTHY
-The container health check has passed\n\t\t\t\t\tsuccessfully.
\n UNHEALTHY
-The container health check has failed.
\n UNKNOWN
-The container health check is being evaluated or\n\t\t\t\t\tthere's no container health check defined.
The following describes the possible healthStatus
values for a task. The\n\t\t\tcontainer health check status of nonessential containers do not have an effect on the\n\t\t\thealth status of a task.
\n HEALTHY
-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.
\n UNHEALTHY
-One or more essential containers have failed\n\t\t\t\t\ttheir health check.
\n UNKNOWN
-The essential containers within the task are still\n\t\t\t\t\thaving their health checks evaluated or there are no container health checks\n\t\t\t\t\tdefined.
If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.
\n\t\tThe following are notes about container health check support:
\n\t\tContainer health checks require version 1.17.0 or greater of the Amazon ECS\n\t\t\t\t\tcontainer agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS Container Agent.
\n\t\t\tContainer health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0 or greater. For more information, see Fargate\n\t\t\t\t\t\tPlatform Versions.
\n\t\t\tContainer health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.
\n\t\t\tAn object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile).
\n\t\tThe Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.
\n\t\tYou can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.
\n\t\tThe following describes the possible healthStatus
values for a\n\t\t\tcontainer:
\n HEALTHY
-The container health check has passed\n\t\t\t\t\tsuccessfully.
\n UNHEALTHY
-The container health check has failed.
\n UNKNOWN
-The container health check is being evaluated or\n\t\t\t\t\tthere's no container health check defined.
The following describes the possible healthStatus
values for a task. The\n\t\t\tcontainer health check status of nonessential containers do not have an effect on the\n\t\t\thealth status of a task.
\n HEALTHY
-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.
\n UNHEALTHY
-One or more essential containers have failed\n\t\t\t\t\ttheir health check.
\n UNKNOWN
-The essential containers within the task are still\n\t\t\t\t\thaving their health checks evaluated or there are no container health checks\n\t\t\t\t\tdefined.
If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.
\n\t\tThe following are notes about container health check support:
\n\t\tContainer health checks require version 1.17.0 or greater of the Amazon ECS\n\t\t\t\t\tcontainer agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.
\n\t\t\tContainer health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0
or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.
Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.
\n\t\t\tThe full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or\n\t\t\ttask set.
\n\t\tA target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a\n\t\t\tClassic Load Balancer, omit the target group ARN.
\n\t\tFor services using the ECS
deployment controller, you can specify one or\n\t\t\tmultiple target groups. For more information, see Registering Multiple Target Groups with a Service in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.
For services using the CODE_DEPLOY
deployment controller, you're required\n\t\t\tto define two target groups for the load balancer. For more information, see Blue/Green Deployment with CodeDeploy in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
If your service's task definition uses the awsvpc
network mode, you\n\t\t\t\tmust choose ip
as the target type, not instance
. Do this\n\t\t\t\twhen creating your target groups because tasks that use the awsvpc
\n\t\t\t\tnetwork mode are associated with an elastic network interface, not an Amazon EC2\n\t\t\t\tinstance. This network mode is required for the Fargate launch\n\t\t\t\ttype.
The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or\n\t\t\ttask set.
\n\t\tA target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a\n\t\t\tClassic Load Balancer, omit the target group ARN.
\n\t\tFor services using the ECS
deployment controller, you can specify one or\n\t\t\tmultiple target groups. For more information, see Registering multiple target groups with a service in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.
For services using the CODE_DEPLOY
deployment controller, you're required\n\t\t\tto define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
If your service's task definition uses the awsvpc
network mode, you\n\t\t\t\tmust choose ip
as the target type, not instance
. Do this\n\t\t\t\twhen creating your target groups because tasks that use the awsvpc
\n\t\t\t\tnetwork mode are associated with an elastic network interface, not an Amazon EC2\n\t\t\t\tinstance. This network mode is required for the Fargate launch\n\t\t\t\ttype.
The load balancer configuration to use with a service or task set.
\n\t\tFor specific notes and restrictions regarding the use of load balancers with services\n\t\t\tand task sets, see the CreateService and CreateTaskSet actions.
\n\t\tWhen you add, update, or remove a load balancer configuration, Amazon ECS starts a new deployment\n\t\t\twith the updated Elastic Load Balancing configuration. This causes tasks to register to and deregister\n\t\t\tfrom load balancers.
\n\t\tWe recommend that you verify this on a test environment before you update the Elastic Load Balancing\n\t\t\tconfiguration.
\n\t\tA service-linked role is required for services that use multiple target groups. For\n\t\t\tmore information, see Service-linked\n\t\t\t\troles in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The load balancer configuration to use with a service or task set.
\n\t\tFor specific notes and restrictions regarding the use of load balancers with services\n\t\t\tand task sets, see the CreateService and CreateTaskSet actions.
\n\t\tWhen you add, update, or remove a load balancer configuration, Amazon ECS starts a new\n\t\t\tdeployment with the updated Elastic Load Balancing configuration. This causes tasks to register to and\n\t\t\tderegister from load balancers.
\n\t\tWe recommend that you verify this on a test environment before you update the Elastic Load Balancing\n\t\t\tconfiguration.
\n\t\tA service-linked role is required for services that use multiple target groups. For\n\t\t\tmore information, see Using\n\t\t\t\tservice-linked roles in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#LoadBalancers": { @@ -5203,7 +5248,7 @@ "secretOptions": { "target": "com.amazonaws.ecs#SecretList", "traits": { - "smithy.api#documentation": "The secrets to pass to the log configuration. For more information, see Specifying\n\t\t\t\tSensitive Data in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The secrets to pass to the log configuration. For more information, see Specifying\n\t\t\t\tsensitive data in the Amazon Elastic Container Service Developer Guide.
" } } }, @@ -5387,7 +5432,7 @@ } }, "traits": { - "smithy.api#documentation": "The managed scaling settings for the Auto Scaling group capacity provider.
\n\t\tWhen managed scaling is enabled, Amazon ECS manages the scale-in and scale-out actions of\n\t\t\tthe Auto Scaling group. Amazon ECS manages a target tracking scaling policy using an Amazon ECS\n\t\t\tmanaged CloudWatch metric with the specified targetCapacity
value as the target\n\t\t\tvalue for the metric. For more information, see Using Managed Scaling in the Amazon Elastic Container Service Developer Guide.
If managed scaling is disabled, the user must manage the scaling of the Auto Scaling\n\t\t\tgroup.
" + "smithy.api#documentation": "The managed scaling settings for the Auto Scaling group capacity provider.
\n\t\tWhen managed scaling is enabled, Amazon ECS manages the scale-in and scale-out actions of\n\t\t\tthe Auto Scaling group. Amazon ECS manages a target tracking scaling policy using an Amazon ECS\n\t\t\tmanaged CloudWatch metric with the specified targetCapacity
value as the target\n\t\t\tvalue for the metric. For more information, see Using managed scaling in the Amazon Elastic Container Service Developer Guide.
If managed scaling is disabled, the user must manage the scaling of the Auto Scaling\n\t\t\tgroup.
" } }, "com.amazonaws.ecs#ManagedScalingInstanceWarmupPeriod": { @@ -5684,7 +5729,7 @@ } }, "traits": { - "smithy.api#documentation": "An object representing a constraint on task placement. For more information, see\n\t\t\t\tTask Placement Constraints in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tIf you're using the Fargate launch type, task placement constraints\n\t\t\t\taren't supported.
\n\t\tAn object representing a constraint on task placement. For more information, see\n\t\t\t\tTask placement constraints in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tIf you're using the Fargate launch type, task placement constraints\n\t\t\t\taren't supported.
\n\t\tThe task placement strategy for a task or service. For more information, see Task Placement Strategies in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The task placement strategy for a task or service. For more information, see Task placement strategies in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#PlacementStrategyType": { @@ -5931,7 +5976,7 @@ } ], "traits": { - "smithy.api#documentation": "Modifies an account setting. Account settings are set on a per-Region basis.
\n\t\tIf you change the account setting for the root user, the default settings for all of\n\t\t\tthe IAM users and roles that no individual account setting was specified are reset for.\n\t\t\tFor more information, see Account\n\t\t\t\tSettings in the Amazon Elastic Container Service Developer Guide.
\n\t\tWhen serviceLongArnFormat
, taskLongArnFormat
, or\n\t\t\t\tcontainerInstanceLongArnFormat
are specified, the Amazon Resource Name\n\t\t\t(ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or\n\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting must be\n\t\t\tset for each Amazon ECS resource separately. The ARN and resource ID format of a resource is\n\t\t\tdefined by the opt-in status of the IAM user or role that created the resource. You must\n\t\t\tturn on this setting to use Amazon ECS features such as resource tagging.
When awsvpcTrunking
is specified, the elastic network interface (ENI)\n\t\t\tlimit for any new container instances that support the feature is changed. If\n\t\t\t\tawsvpcTrunking
is enabled, any new container instances that support the\n\t\t\tfeature are launched have the increased ENI limits available to them. For more\n\t\t\tinformation, see Elastic Network\n\t\t\t\tInterface Trunking in the Amazon Elastic Container Service Developer Guide.
When containerInsights
is specified, the default setting indicating\n\t\t\twhether CloudWatch Container Insights is enabled for your clusters is changed. If\n\t\t\t\tcontainerInsights
is enabled, any new clusters that are created will\n\t\t\thave Container Insights enabled unless you disable it during cluster creation. For more\n\t\t\tinformation, see CloudWatch\n\t\t\t\tContainer Insights in the Amazon Elastic Container Service Developer Guide.
Modifies an account setting. Account settings are set on a per-Region basis.
\n\t\tIf you change the account setting for the root user, the default settings for all of\n\t\t\tthe IAM users and roles that no individual account setting was specified are reset for.\n\t\t\tFor more information, see Account\n\t\t\t\tSettings in the Amazon Elastic Container Service Developer Guide.
\n\t\tWhen serviceLongArnFormat
, taskLongArnFormat
, or\n\t\t\t\tcontainerInstanceLongArnFormat
are specified, the Amazon Resource Name\n\t\t\t(ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or\n\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting must be\n\t\t\tset for each Amazon ECS resource separately. The ARN and resource ID format of a resource\n\t\t\tis defined by the opt-in status of the IAM user or role that created the resource. You\n\t\t\tmust turn on this setting to use Amazon ECS features such as resource tagging.
When awsvpcTrunking
is specified, the elastic network interface (ENI)\n\t\t\tlimit for any new container instances that support the feature is changed. If\n\t\t\t\tawsvpcTrunking
is enabled, any new container instances that support the\n\t\t\tfeature are launched have the increased ENI limits available to them. For more\n\t\t\tinformation, see Elastic Network\n\t\t\t\tInterface Trunking in the Amazon Elastic Container Service Developer Guide.
When containerInsights
is specified, the default setting indicating\n\t\t\twhether CloudWatch Container Insights is enabled for your clusters is changed. If\n\t\t\t\tcontainerInsights
is enabled, any new clusters that are created will\n\t\t\thave Container Insights enabled unless you disable it during cluster creation. For more\n\t\t\tinformation, see CloudWatch\n\t\t\t\tContainer Insights in the Amazon Elastic Container Service Developer Guide.
The ARN of the principal, which can be an IAM user, IAM role, or the root user. If you\n\t\t\tspecify the root user, it modifies the account setting for all IAM users, IAM roles, and\n\t\t\tthe root user of the account unless an IAM user or role explicitly overrides these\n\t\t\tsettings. If this field is omitted, the setting is changed only for the authenticated\n\t\t\tuser.
\n\t\tFederated users assume the account setting of the root user and can't have\n\t\t\t\texplicit account settings set for them.
\n\t\tThe ARN of the principal, which can be an IAM user, IAM role, or the root user. If\n\t\t\tyou specify the root user, it modifies the account setting for all IAM users, IAM roles,\n\t\t\tand the root user of the account unless an IAM user or role explicitly overrides these\n\t\t\tsettings. If this field is omitted, the setting is changed only for the authenticated\n\t\t\tuser.
\n\t\tFederated users assume the account setting of the root user and can't have\n\t\t\t\texplicit account settings set for them.
\n\t\tThe Amazon Resource Name (ARN) of the secret containing the private repository\n\t\t\tcredentials.
\n\t\tWhen you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same\n\t\t\t\tRegion as the task that you're launching then you can use either the full ARN or the\n\t\t\t\tname of the secret. When you use the Amazon Web Services Management Console, you must specify the full ARN of the\n\t\t\t\tsecret.
\n\t\tThe Amazon Resource Name (ARN) of the secret containing the private repository\n\t\t\tcredentials.
\n\t\tWhen you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same\n\t\t\t\tRegion as the task that you're launching then you can use either the full ARN or\n\t\t\t\tthe name of the secret. When you use the Amazon Web Services Management Console, you must specify the full ARN\n\t\t\t\tof the secret.
\n\t\tStarts a new task using the specified task definition.
\n\t\tYou can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tAlternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.
\n\t\tThe Amazon ECS API follows an eventual consistency model. This is because of the distributed\n\t\t\tnature of the system supporting the API. This means that the result of an API command\n\t\t\tyou run that affects your Amazon ECS resources might not be immediately visible to all\n\t\t\tsubsequent commands you run. Keep this in mind when you carry out an API command that\n\t\t\timmediately follows a previous API command.
\n\t\tTo manage eventual consistency, you can do the following:
\n\t\tConfirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.
\n\t\t\tAdd wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.
\n\t\t\tStarts a new task using the specified task definition.
\n\t\tYou can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tAlternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.
\n\t\tThe Amazon ECS API follows an eventual consistency model. This is because of the\n\t\t\tdistributed nature of the system supporting the API. This means that the result of an\n\t\t\tAPI command you run that affects your Amazon ECS resources might not be immediately visible\n\t\t\tto all subsequent commands you run. Keep this in mind when you carry out an API command\n\t\t\tthat immediately follows a previous API command.
\n\t\tTo manage eventual consistency, you can do the following:
\n\t\tConfirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.
\n\t\t\tAdd wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.
\n\t\t\tDetermines whether to use the execute command functionality for the containers in\n\t\t\tthis task. If true
, this enables execute command functionality on all\n\t\t\tcontainers in the task.
If true
, then the task definition must have a task role, or you must\n\t\t\tprovide one as an override.
Determines whether to use the execute command functionality for the containers in this\n\t\t\ttask. If true
, this enables execute command functionality on all containers\n\t\t\tin the task.
If true
, then the task definition must have a task role, or you must\n\t\t\tprovide one as an override.
The family
and revision
(family:revision
) or\n\t\t\tfull ARN of the task definition to run. If a revision
isn't specified,\n\t\t\tthe latest ACTIVE
revision is used.
When you create an IAM policy for run-task, you can set the resource to be the latest task definition revision, or a specific revision.
\n\t\tThe full ARN value must match the value that you specified as the Resource
of\n\t\t\tthe IAM principal's permissions policy.
When you specify the policy resource as the latest task definition version (by setting the\n\t\t\t\tResource
in the policy to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName
),\n\t\t\tthen set this value to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName
.
When you specify the policy resource as a specific task definition version (by setting the\n\t\t\t\tResource
in the policy to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1
or\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*
),\n\t\t\tthen set this value to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1
.
For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service\n\t\t\tdeveloper Guide.
", + "smithy.api#documentation": "The family
and revision
(family:revision
) or\n\t\t\tfull ARN of the task definition to run. If a revision
isn't specified,\n\t\t\tthe latest ACTIVE
revision is used.
When you create an IAM policy for run-task, you can set the resource to be the latest\n\t\t\ttask definition revision, or a specific revision.
\n\t\tThe full ARN value must match the value that you specified as the\n\t\t\t\tResource
of the IAM principal's permissions policy.
When you specify the policy resource as the latest task definition version (by setting\n\t\t\tthe Resource
in the policy to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName
),\n\t\t\tthen set this value to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName
.
When you specify the policy resource as a specific task definition version (by setting\n\t\t\tthe Resource
in the policy to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1
or\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*
),\n\t\t\tthen set this value to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1
.
For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service developer Guide.
", "smithy.api#required": {} } } @@ -6699,7 +6744,7 @@ "cpuArchitecture": { "target": "com.amazonaws.ecs#CPUArchitecture", "traits": { - "smithy.api#documentation": "The CPU architecture.
\n\t\tYou can run your Linux tasks on an ARM-based platform by setting the value to ARM64
. This option is avaiable\n\t\t\tfor tasks that run on Linuc Amazon EC2 instance or Linux containers on Fargate.
The CPU architecture.
\n\t\tYou can run your Linux tasks on an ARM-based platform by setting the value to\n\t\t\t\tARM64
. This option is avaiable for tasks that run on Linux Amazon EC2\n\t\t\tinstance or Linux containers on Fargate.
The secret to expose to the container. The supported values are either the full ARN of\n\t\t\tthe Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store.
\n\t\tFor information about the require Identity and Access Management permissions, see Required IAM permissions for Amazon ECS secrets (for Secrets Manager) or Required IAM permissions for Amazon ECS secrets (for Systems Manager Parameter store) in the Amazon Elastic Container Service Developer Guide.
\n\t\tIf the SSM Parameter Store parameter exists in the same Region as the task\n\t\t\t\tyou're launching, then you can use either the full ARN or name of the parameter. If\n\t\t\t\tthe parameter exists in a different Region, then the full ARN must be\n\t\t\t\tspecified.
\n\t\tThe secret to expose to the container. The supported values are either the full ARN\n\t\t\tof the Secrets Manager secret or the full ARN of the parameter in the SSM Parameter\n\t\t\tStore.
\n\t\tFor information about the require Identity and Access Management permissions, see Required IAM permissions for Amazon ECS secrets (for Secrets Manager) or Required IAM permissions for Amazon ECS secrets (for Systems Manager Parameter\n\t\t\tstore) in the Amazon Elastic Container Service Developer Guide.
\n\t\tIf the SSM Parameter Store parameter exists in the same Region as the task\n\t\t\t\tyou're launching, then you can use either the full ARN or name of the parameter.\n\t\t\t\tIf the parameter exists in a different Region, then the full ARN must be\n\t\t\t\tspecified.
\n\t\tAn object representing the secret to expose to your container. Secrets can be exposed\n\t\t\tto a container in the following ways:
\n\t\tTo inject sensitive data into your containers as environment variables, use\n\t\t\t\t\tthe secrets
container definition parameter.
To reference sensitive information in the log configuration of a container,\n\t\t\t\t\tuse the secretOptions
container definition parameter.
For more information, see Specifying\n\t\t\t\tSensitive Data in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "An object representing the secret to expose to your container. Secrets can be exposed\n\t\t\tto a container in the following ways:
\n\t\tTo inject sensitive data into your containers as environment variables, use\n\t\t\t\t\tthe secrets
container definition parameter.
To reference sensitive information in the log configuration of a container,\n\t\t\t\t\tuse the secretOptions
container definition parameter.
For more information, see Specifying\n\t\t\t\tsensitive data in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#SecretList": { @@ -6826,7 +6871,7 @@ "serviceArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The ARN that identifies the service. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the service, the Amazon Web Services account ID of the service owner, the service
namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service
.
The ARN that identifies the service. For more information about the ARN format,\n\t\t\tsee Amazon Resource Name (ARN) in the Amazon ECS Developer Guide.
" } }, "serviceName": { @@ -6970,7 +7015,7 @@ "schedulingStrategy": { "target": "com.amazonaws.ecs#SchedulingStrategy", "traits": { - "smithy.api#documentation": "The scheduling strategy to use for the service. For more information, see Services.
\n\t\tThere are two service scheduler strategies available.
\n\t\t\n REPLICA
-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement\n\t\t\t\t\tdecisions.
\n DAEMON
-The daemon scheduling strategy deploys exactly one task on each\n\t\t\t\t\tactive container instance. This task meets all of the task placement constraints\n\t\t\t\t\tthat you specify in your cluster. The service scheduler also evaluates the task\n\t\t\t\t\tplacement constraints for running tasks. It stop tasks that don't meet the\n\t\t\t\t\tplacement constraints.
Fargate tasks don't support the DAEMON
\n\t\t\t\t\t\tscheduling strategy.
The scheduling strategy to use for the service. For more information, see Services.
\n\t\tThere are two service scheduler strategies available.
\n\t\t\n REPLICA
-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement\n\t\t\t\t\tdecisions.
\n DAEMON
-The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance. This task meets all of the task\n\t\t\t\t\tplacement constraints that you specify in your cluster. The service scheduler\n\t\t\t\t\talso evaluates the task placement constraints for running tasks. It stop tasks\n\t\t\t\t\tthat don't meet the placement constraints.
Fargate tasks don't support the DAEMON
\n\t\t\t\t\t\tscheduling strategy.
The details for the service registry.
\n\t\tEach service may be associated with one service registry. Multiple service registries for\n\t\t\teach service are not supported.
\n\t\tWhen you add, update, or remove the service registries configuration, Amazon ECS starts a\n\t\t\tnew deployment. New tasks are registered and deregistered to the updated service\n\t\t\tregistry configuration.
" + "smithy.api#documentation": "The details for the service registry.
\n\t\tEach service may be associated with one service registry. Multiple service registries\n\t\t\tfor each service are not supported.
\n\t\tWhen you add, update, or remove the service registries configuration, Amazon ECS starts a\n\t\t\tnew deployment. New tasks are registered and deregistered to the updated service\n\t\t\tregistry configuration.
" } }, "com.amazonaws.ecs#Services": { @@ -7143,7 +7188,7 @@ "streamUrl": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "A URL\n\t\t\t\t\t\tto the managed agent on the container that the SSM Session Manager client\n\t\t\tuses to send commands and receive output from the container.
" + "smithy.api#documentation": "A URL to the managed agent on the container that the SSM Session Manager client uses\n\t\t\tto send commands and receive output from the container.
" } }, "tokenValue": { @@ -7841,7 +7886,7 @@ } }, "traits": { - "smithy.api#documentation": "The execute command cannot run. This error can be caused by any of the following\n\t\t\tconfiguration issues:
\n\t\tIncorrect IAM permissions
\nThe SSM agent is not installed or is not running
\nThere is an interface Amazon VPC endpoint for Amazon ECS, but there is not one for for Systems Manager Session Manager
\nFor information about how to troubleshoot the\n\t\t\tissues, see Troubleshooting issues with ECS Exec in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
", + "smithy.api#documentation": "The execute command cannot run. This error can be caused by any of the following\n\t\t\tconfiguration issues:
\n\t\tIncorrect IAM permissions
\n\t\t\tThe SSM agent is not installed or is not running
\n\t\t\tThere is an interface Amazon VPC endpoint for Amazon ECS, but there is not one for\n\t\t\t\t\tfor Systems Manager Session Manager
\n\t\t\tFor information about how to troubleshoot the issues, see Troubleshooting issues with ECS\n\t\t\t\tExec in the Amazon Elastic Container Service Developer Guide.
", "smithy.api#error": "client" } }, @@ -9181,7 +9226,7 @@ } ], "traits": { - "smithy.api#documentation": "Updating the task placement strategies and constraints on an Amazon ECS service remains\n\t\t\t\tin preview and is a Beta Service as defined by and subject to the Beta Service\n\t\t\t\tParticipation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms\n\t\t\t\tapply to your participation in this preview.
\n\t\tModifies the parameters of a service.
\n\t\tFor services using the rolling update (ECS
) you can update the desired count,\n\t\t\tdeployment configuration, network configuration, load balancers, service registries,\n\t\t\tenable ECS managed tags option, propagate tags option, task placement constraints and\n\t\t\tstrategies, and task definition. When you update any of these parameters, Amazon ECS starts\n\t\t\tnew tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the\n\t\t\tdesired count, deployment configuration, health check grace period, task placement\n\t\t\tconstraints and strategies, enable ECS managed tags option, and propagate tags can be\n\t\t\tupdated using this API. If the network configuration, platform version, task definition,\n\t\t\tor load balancer need to be updated, create a new CodeDeploy deployment. For more\n\t\t\tinformation, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count,\n\t\t\ttask placement constraints and strategies, health check grace period, enable ECS managed\n\t\t\ttags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.
\n\t\tYou can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount
parameter.
If you have updated the Docker image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.
\n\t\tIf your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest
), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment
option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.
You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent
and\n\t\t\t\tmaximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING
state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING
state and are reported\n\t\t\t\t\tas healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop
is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM
and a 30-second timeout. After this,\n\t\t\t\tSIGKILL
is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.
\n\t\tDetermine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.
\n\t\t\tBy default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.
\n\t\t\t\tSort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.
\n\t\t\t\t\tPlace the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.
\n\t\t\t\t\tWhen the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:
\n\t\tSort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.
\n\t\t\tStop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.
\n\t\t\tYou must have a service-linked role when you update any of the following service properties.\n\t\t\tIf you specified a custom IAM role when you created the service, Amazon ECS automatically\n\t\t\treplaces the roleARN associated with the service with the ARN of your service-linked\n\t\t\trole. For more information, see Service-linked\n\t\t\t\troles in the Amazon Elastic Container Service Developer Guide.
\n\t\t\n loadBalancers,
\n
\n serviceRegistries
\n
Modifies the parameters of a service.
\n\t\tFor services using the rolling update (ECS
) you can update the desired\n\t\t\tcount, deployment configuration, network configuration, load balancers, service\n\t\t\tregistries, enable ECS managed tags option, propagate tags option, task placement\n\t\t\tconstraints and strategies, and task definition. When you update any of these\n\t\t\tparameters, Amazon ECS starts new tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY
) deployment controller,\n\t\t\tonly the desired count, deployment configuration, health check grace period, task\n\t\t\tplacement constraints and strategies, enable ECS managed tags option, and propagate tags\n\t\t\tcan be updated using this API. If the network configuration, platform version, task\n\t\t\tdefinition, or load balancer need to be updated, create a new CodeDeploy deployment. For more\n\t\t\tinformation, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired\n\t\t\tcount, task placement constraints and strategies, health check grace period, enable ECS\n\t\t\tmanaged tags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.
\n\t\tYou can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount
parameter.
If you have updated the Docker image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.
\n\t\tIf your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest
), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment
option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.
You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent
and\n\t\t\t\tmaximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING
state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING
state and are reported\n\t\t\t\t\tas healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop
is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM
and a 30-second timeout. After this,\n\t\t\t\tSIGKILL
is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.
\n\t\tDetermine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.
\n\t\t\tBy default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.
\n\t\t\t\tSort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.
\n\t\t\t\t\tPlace the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.
\n\t\t\t\t\tWhen the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:
\n\t\tSort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.
\n\t\t\tStop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.
\n\t\t\tYou must have a service-linked role when you update any of the following service\n\t\t\t\tproperties. If you specified a custom IAM role when you created the service, Amazon ECS\n\t\t\t\tautomatically replaces the roleARN associated with the service with the ARN of your\n\t\t\t\tservice-linked role. For more information, see Service-linked roles in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\t\t\n loadBalancers,
\n
\n serviceRegistries
\n
Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For more\n\t\t\tinformation, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.
\n\t\tOnly tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment
to true
, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.
Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For\n\t\t\tmore information, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.
\n\t\tOnly tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment
to true
, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.
A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the\n\t\t\tcontainer name, and the container port to access from the load balancer. The container\n\t\t\tname is as it appears in a container definition.
\n\t\tWhen you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with\n\t\t\tthe updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are\n\t\t\trunning.
\n\t\tFor services that use rolling updates, you can add, update, or remove Elastic Load Balancing target groups.\n\t\t\tYou can update from a single target group to multiple target groups and from multiple\n\t\t\ttarget groups to a single target group.
\n\t\tFor services that use blue/green deployments, you can update Elastic Load Balancing target groups by using\n\t\t\t\t\t\n CreateDeployment\n
through CodeDeploy. Note that multiple target groups\n\t\t\tare not supported for blue/green deployments. For more information see Register\n\t\t\t\tmultiple target groups with a service in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
For services that use the external deployment controller, you can add, update, or remove\n\t\t\tload balancers by using CreateTaskSet.\n\t\t\tNote that multiple target groups are not supported for external deployments. For more\n\t\t\tinformation see Register\n\t\t\t\tmultiple target groups with a service in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tYou can remove existing loadBalancers
by passing an empty list.
A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the\n\t\t\tcontainer name, and the container port to access from the load balancer. The container\n\t\t\tname is as it appears in a container definition.
\n\t\tWhen you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks\n\t\t\twith the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks\n\t\t\tare running.
\n\t\tFor services that use rolling updates, you can add, update, or remove Elastic Load Balancing target\n\t\t\tgroups. You can update from a single target group to multiple target groups and from\n\t\t\tmultiple target groups to a single target group.
\n\t\tFor services that use blue/green deployments, you can update Elastic Load Balancing target groups by\n\t\t\tusing \n CreateDeployment\n
through CodeDeploy. Note that multiple target groups\n\t\t\tare not supported for blue/green deployments. For more information see Register\n\t\t\t\tmultiple target groups with a service in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
For services that use the external deployment controller, you can add, update, or\n\t\t\tremove load balancers by using CreateTaskSet.\n\t\t\tNote that multiple target groups are not supported for external deployments. For more\n\t\t\tinformation see Register\n\t\t\t\tmultiple target groups with a service in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tYou can remove existing loadBalancers
by passing an empty list.
The details for the service discovery registries to assign to this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tDiscovery.
\n\t\tWhen you add, update, or remove the service registries configuration, Amazon ECS starts new tasks\n\t\t\twith the updated service registries configuration, and then stops the old tasks when the\n\t\t\tnew tasks are running.
\n\t\tYou can remove existing serviceRegistries
by passing an empty\n\t\t\tlist.
The details for the service discovery registries to assign to this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tDiscovery.
\n\t\tWhen you add, update, or remove the service registries configuration, Amazon ECS starts new\n\t\t\ttasks with the updated service registries configuration, and then stops the old tasks\n\t\t\twhen the new tasks are running.
\n\t\tYou can remove existing serviceRegistries
by passing an empty\n\t\t\tlist.
Type of additional resource.
" + } + }, + "Content": { + "target": "com.amazonaws.wellarchitected#Urls", + "traits": { + "smithy.api#documentation": "The URLs for additional resources, either helpful resources or improvement plans. Up to five additional URLs can be specified.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The choice level additional resources.
" + } + }, + "com.amazonaws.wellarchitected#AdditionalResourcesList": { + "type": "list", + "member": { + "target": "com.amazonaws.wellarchitected#AdditionalResources" + } + }, "com.amazonaws.wellarchitected#Answer": { "type": "structure", "members": { @@ -186,6 +227,9 @@ "input": { "target": "com.amazonaws.wellarchitected#AssociateLensesInput" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.wellarchitected#AccessDeniedException" @@ -282,6 +326,12 @@ "traits": { "smithy.api#documentation": "The choice level improvement plan.
" } + }, + "AdditionalResources": { + "target": "com.amazonaws.wellarchitected#AdditionalResourcesList", + "traits": { + "smithy.api#documentation": "The additional resources for a choice. A choice can have up to two additional resources: one of type HELPFUL_RESOURCE
, \n one of type IMPROVEMENT_PLAN
, or both.
The ID assigned to the share invitation.
" } + }, + "Tags": { + "target": "com.amazonaws.wellarchitected#TagMap", + "traits": { + "smithy.api#documentation": "The tags assigned to the lens.
" + } } }, "traits": { @@ -2229,7 +2297,7 @@ "com.amazonaws.wellarchitected#LensAlias": { "type": "string", "traits": { - "smithy.api#documentation": "The alias of the lens, for example, serverless
.
Each lens is identified by its LensSummary$LensAlias.
", + "smithy.api#documentation": "The alias of the lens.
\nFor Amazon Web Services official lenses, this is either the lens alias,\n such as serverless
, or the lens ARN, such as arn:aws:wellarchitected:us-west-2::lens/serverless
.
For custom lenses, this is the lens ARN, such as arn:aws:wellarchitected:us-east-1:123456789012:lens/my-lens
.
Each lens is identified by its LensSummary$LensAlias.
", "smithy.api#length": { "min": 1, "max": 128 @@ -3385,7 +3453,7 @@ } ], "traits": { - "smithy.api#documentation": "List the tags for a resource.
", + "smithy.api#documentation": "List the tags for a resource.
\nThe WorkloadArn parameter can be either a workload ARN or a custom lens ARN.
\nAdds one or more tags to the specified resource.
", + "smithy.api#documentation": "Adds one or more tags to the specified resource.
\nThe WorkloadArn parameter can be either a workload ARN or a custom lens ARN.
\nDeletes specified tags from a resource.
\nTo specify multiple tags, use separate tagKeys parameters, for example:
\n\n DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2
\n
Deletes specified tags from a resource.
\nThe WorkloadArn parameter can be either a workload ARN or a custom lens ARN.
\nTo specify multiple tags, use separate tagKeys parameters, for example:
\n\n DELETE /tags/WorkloadArn?tagKeys=key1&tagKeys=key2
\n