From 16800bd3b836242a1b87bef56ad53571d7d4c4b5 Mon Sep 17 00:00:00 2001 From: John Preston Date: Sat, 4 Apr 2020 15:36:17 +0100 Subject: [PATCH] Refactoring compute and its CLI (#8) Refactoring compute and its CLI tested and working. --- ecs_composex/cli.py | 27 +- ecs_composex/common/__init__.py | 51 +++- ecs_composex/common/cfn_conditions.py | 6 + ecs_composex/common/cfn_params.py | 10 + ecs_composex/compute/__init__.py | 30 ++- ecs_composex/compute/cli.py | 107 ++++---- ecs_composex/compute/cluster_conditions.py | 22 -- ecs_composex/compute/cluster_params.py | 80 ------ ecs_composex/compute/cluster_template.py | 115 -------- ecs_composex/compute/compute_conditions.py | 7 + ecs_composex/compute/compute_params.py | 48 ++++ ecs_composex/compute/compute_template.py | 147 ++++++++++ ecs_composex/compute/hosts_template.py | 297 +++++++++------------ ecs_composex/compute/spot_fleet.py | 232 ++++++++-------- ecs_composex/root.py | 139 +++++----- examples/services_with_queues.yml | 1 - setup.py | 1 + tests/services_with_queues.yml | 3 +- 18 files changed, 657 insertions(+), 666 deletions(-) delete mode 100644 ecs_composex/compute/cluster_conditions.py delete mode 100644 ecs_composex/compute/cluster_params.py delete mode 100644 ecs_composex/compute/cluster_template.py create mode 100644 ecs_composex/compute/compute_conditions.py create mode 100644 ecs_composex/compute/compute_params.py create mode 100644 ecs_composex/compute/compute_template.py diff --git a/ecs_composex/cli.py b/ecs_composex/cli.py index bc098b1e6..f93415e23 100644 --- a/ecs_composex/cli.py +++ b/ecs_composex/cli.py @@ -6,8 +6,10 @@ import sys import argparse import warnings +import json from boto3 import session -from ecs_composex.common import LOG +from ecs_composex import XFILE_DEST +from ecs_composex.common import LOG, write_template_to_file from ecs_composex.common.aws import ( BUCKET_NAME, CURATED_AZS ) @@ -17,14 +19,13 @@ APP_SUBNETS_T, PUBLIC_SUBNETS_T, STORAGE_SUBNETS_T, VPC_ID_T, VPC_MAP_ID_T ) -from ecs_composex.ecs.ecs_params import LAUNCH_TYPE_T -from ecs_composex.compute.cluster_params import ( - CLUSTER_NAME_T, USE_FLEET_T -) +from ecs_composex.common.cfn_params import USE_FLEET_T +from ecs_composex.compute.compute_params import CLUSTER_NAME_T def validate_vpc_input(args): - """Function to validate the VPC arguments are all present + """ + Function to validate the VPC arguments are all present :param args: Parser arguments :type args: dict @@ -56,6 +57,7 @@ def validate_cluster_input(args): """Function to validate the cluster arguments :param args: Parser arguments + :raise: KeyError """ if not KEYISSET('CreateCluster', args) and not KEYISSET(CLUSTER_NAME_T, args): raise KeyError(f"You must provide an ECS Cluster name if you do not want ECS ComposeX to create one for you") @@ -66,7 +68,7 @@ def main(): parser = argparse.ArgumentParser() # Generic settings parser.add_argument( - '-f', '--docker-compose-file', dest='ComposeXFile', + '-f', '--docker-compose-file', dest=XFILE_DEST, required=True, help="Path to the Docker compose file" ) parser.add_argument( @@ -131,17 +133,13 @@ def main(): help='Override/Provide ECS Cluster name' ) # COMPUTE SETTINGS - parser.add_argument( - '--use-fargate', required=False, default=False, action='store_true', - dest=LAUNCH_TYPE_T, help="If you run Fargate only, no EC2 will be created" - ) parser.add_argument( '--use-spot-fleet', required=False, default=False, action='store_true', dest=USE_FLEET_T, help="Runs spotfleet for EC2. If used in combination " "of --use-fargate, it will create an additional SpotFleet" ) parser.add_argument( - '--create-launch-template', dest='CreateLaunchTemplate', action='store_true', + '--add-compute-resources', dest='AddComputeResources', action='store_true', help='Whether you want to create a launch template to create EC2 resources for' ' to expand the ECS Cluster and run containers on EC2 instances you might have access to.' ) @@ -158,7 +156,10 @@ def main(): validate_cluster_input(vars(args)) print("Arguments: " + str(args._)) - generate_full_template(**vars(args)) + templates_and_params = generate_full_template(**vars(args)) + write_template_to_file(templates_and_params[0], args.output_file) + with open(f"{args.output_file.split('.')[0]}.params.json", 'w') as params_fd: + params_fd.write(json.dumps(templates_and_params[1], indent=4)) if __name__ == "__main__": diff --git a/ecs_composex/common/__init__.py b/ecs_composex/common/__init__.py index c872dbeb8..48712d513 100644 --- a/ecs_composex/common/__init__.py +++ b/ecs_composex/common/__init__.py @@ -21,7 +21,6 @@ from ecs_composex.common import cfn_params from ecs_composex.common import cfn_conditions - DATE = dt.utcnow().isoformat() DATE_PREFIX = dt.utcnow().strftime('%Y/%m/%d/%H%M') NONALPHANUM = re.compile(r'[\W]+') @@ -237,6 +236,56 @@ def setup_logging(): return the_logger +def write_template_to_file(template, file_path): + """ + Function to write the template to a specific path + :param template: the troposphere template + :type template: troposphere.Template + :param file_path: file path where to write the template rendered + :type file_path: str + """ + regex = re.compile(r'(.yml|.yaml)$') + with open(file_path, 'w') as template_fd: + if regex.findall(file_path): + template_fd.write(template.to_yaml()) + else: + template_fd.write(template.to_json()) + + +def build_default_stack_parameters(stack_params, **kwargs): + """ + Function to check and define default parameters for the root stack from the CLI options + :param stack_params: list of parameters to add to to use for the root stack + :type stack_params: list + :param kwargs: extended arguments + :type kwargs: dict + """ + if KEYISSET(cfn_params.USE_FLEET_T, kwargs): + build_parameters_file(stack_params, cfn_params.USE_FLEET_T, kwargs[cfn_params.USE_FLEET_T]) + + +def build_parameters_file(params: list, parameter_name: str, parameter_value): + """ + Function to build arguments file to pass onto CFN. + Adds the parameter key/value so it can be written to file afterwards + + :param params: list of parameters + :type params: list + :param parameter_name: key of the parameter + :type parameter_name: str + :param parameter_value: value of the parameter + :type parameter_value: str or int or list + """ + if params is None: + params = [] + if isinstance(parameter_value, (int, float)): + parameter_value = str(parameter_value) + params.append({ + "ParameterKey": parameter_name, + "ParameterValue": parameter_value + }) + + def load_composex_file(file_path): """File to load and read the docker compose file diff --git a/ecs_composex/common/cfn_conditions.py b/ecs_composex/common/cfn_conditions.py index 724946d63..2e16da5a5 100644 --- a/ecs_composex/common/cfn_conditions.py +++ b/ecs_composex/common/cfn_conditions.py @@ -58,3 +58,9 @@ Condition(USE_SSM_EXPORTS_T), Condition(NOT_USE_CFN_EXPORTS_T) ) + +USE_SPOT_CON_T = "UseSpotFleetHostsCondition" +USE_SPOT_CON = Equals(Ref(cfn_params.USE_FLEET), "True") + +NOT_USE_SPOT_CON_T = "NotUseSpotFleetHostsCondition" +NOT_USE_SPOT_CON = Not(Condition(USE_SPOT_CON_T)) diff --git a/ecs_composex/common/cfn_params.py b/ecs_composex/common/cfn_params.py index 8937e3322..644a72522 100644 --- a/ecs_composex/common/cfn_params.py +++ b/ecs_composex/common/cfn_params.py @@ -50,3 +50,13 @@ USE_SSM_EXPORTS_T, Type='String', AllowedValues=['True', 'False'], Default='False' ) + +USE_FLEET_T = "UseSpotFleetHosts" +USE_FLEET = Parameter( + USE_FLEET_T, Type="String", Default="False", AllowedValues=["True", "False"] +) + +USE_ONDEMAND_T = "UseOnDemandHosts" +USE_ONDEMAND = Parameter( + USE_ONDEMAND_T, Type="String", Default="False", AllowedValues=["True", "False"] +) diff --git a/ecs_composex/compute/__init__.py b/ecs_composex/compute/__init__.py index 6046f09f9..7439bab98 100644 --- a/ecs_composex/compute/__init__.py +++ b/ecs_composex/compute/__init__.py @@ -9,12 +9,14 @@ import boto3 -from ecs_composex.common import KEYISSET, load_composex_file +from ecs_composex import XFILE_DEST +from ecs_composex.common import KEYISSET, load_composex_file, build_default_stack_parameters from ecs_composex.common.aws import get_curated_azs -from ecs_composex.compute.cluster_template import generate_cluster_template +from ecs_composex.common.tagging import generate_tags_parameters +from ecs_composex.compute.compute_template import generate_compute_template -def create_cluster_template(session=None, **kwargs): +def create_compute_stack(session=None, **kwargs): """ Function entrypoint for CLI. :param session: boto3 session to override API calls with @@ -23,20 +25,22 @@ def create_cluster_template(session=None, **kwargs): :return: cluster template :rtype: troposphere.Template """ + tags_params = () + stack_params = [] compose_content = None - if KEYISSET('ComposeXFile', kwargs): - compose_content = load_composex_file(kwargs['ComposeXFile']) - - azs = [] - if not KEYISSET('AwsAzs', kwargs): - if KEYISSET('AwsRegion', kwargs): - azs = get_curated_azs(region=kwargs['AwsRegion']) + if KEYISSET(XFILE_DEST, kwargs): + compose_content = load_composex_file(kwargs[XFILE_DEST]) + tags_params = generate_tags_parameters(compose_content) + if not KEYISSET("AwsAzs", kwargs): + if KEYISSET("AwsRegion", kwargs): + azs = get_curated_azs(region=kwargs["AwsRegion"]) elif session is None: session = boto3.session.Session() azs = get_curated_azs(session=session) else: azs = get_curated_azs() - else: - azs = kwargs['AwsAzs'] - return generate_cluster_template(azs, compose_content, **kwargs) + azs = kwargs["AwsAzs"] + template = generate_compute_template(azs, compose_content, tags_params, **kwargs) + build_default_stack_parameters(stack_params, **kwargs) + return template, stack_params diff --git a/ecs_composex/compute/cli.py b/ecs_composex/compute/cli.py index 2be37bc98..0cc3c8193 100644 --- a/ecs_composex/compute/cli.py +++ b/ecs_composex/compute/cli.py @@ -8,16 +8,16 @@ import argparse import sys +import json from boto3 import session - -from ecs_composex.compute import create_cluster_template +from ecs_composex import XFILE_DEST +from ecs_composex.common import write_template_to_file from ecs_composex.common.aws import CURATED_AZS, BUCKET_NAME from ecs_composex.ecs.ecs_params import CLUSTER_NAME_T -from ecs_composex.vpc.vpc_params import ( - VPC_ID_T, APP_SUBNETS_T, - PUBLIC_SUBNETS_T -) +from ecs_composex.vpc.vpc_params import VPC_ID_T, APP_SUBNETS_T +from ecs_composex.compute import create_compute_stack +from ecs_composex.common.cfn_params import USE_FLEET_T def root_parser(): @@ -26,64 +26,69 @@ def root_parser(): """ parser = argparse.ArgumentParser() parser.add_argument( - '-f', '--docker-compose-file', required=False + "-f", + "--docker-compose-file", + required=False, + dest=XFILE_DEST, + help="Optionally use the YAML ComposeX file to add options and settings", ) parser.add_argument( - '-o', '--output-file', required=True, help="Output file" + "-o", + "--output-file", + required=True, + help="Output file. Extension determines the file format", ) # AWS SETTINGS parser.add_argument( - '--region', required=False, default=session.Session().region_name, - dest='AwsRegion', + "--region", + required=False, + default=session.Session().region_name, + dest="AwsRegion", help="Specify the region you want to build for" - "default use default region from config or environment vars" + "default use default region from config or environment vars", ) parser.add_argument( - '--az', dest='AwsAzs', action='append', required=False, default=CURATED_AZS, - help="List AZs you want to deploy to specifically within the region" + "--az", + dest="AwsAzs", + action="append", + required=False, + default=CURATED_AZS, + help="List AZs you want to deploy to specifically within the region", ) parser.add_argument( - '-b', '--bucket-name', type=str, required=False, default=BUCKET_NAME, - help='Bucket name to upload the templates to', dest='BucketName' + "-b", + "--bucket-name", + type=str, + required=False, + default=BUCKET_NAME, + help="Bucket name to upload the templates to", + dest="BucketName", ) # VPC SETTINGS parser.add_argument( - '--create-vpc', required=False, default=False, action='store_true', - help="Create a VPC for this deployment", dest='CreateVpc' - ) - parser.add_argument( - '--vpc-cidr', required=False, default='192.168.36.0/22', dest='VpcCidr', - help="Specify the VPC CIDR if you use --create-vpc" - ) - parser.add_argument( - '--vpc-id', dest=VPC_ID_T, required=False, type=str, - help='Specify VPC ID when not creating one' - ) - parser.add_argument( - '--public-subnets', required=False, dest=PUBLIC_SUBNETS_T, action='append', - help="List of Subnet IDs to use for the cluster when not creating VPC" + "--vpc-id", + dest=VPC_ID_T, + required=False, + type=str, + help="Specify VPC ID when not creating one", ) parser.add_argument( - '--app-subnets', required=False, dest=APP_SUBNETS_T, action='append', - help="List of Subnet IDs to use for the cluster when not creating VPC" - ) - parser.add_argument( - '--storage-subnets', required=False, dest=APP_SUBNETS_T, action='append', - help="List of Subnet IDs to use for the cluster when not creating VPC" + "--hosts-subnets", + required=False, + dest=APP_SUBNETS_T, + action="append", + help="List of Subnet IDs to use for the cluster when not creating VPC", ) # CLUSTER SETTINGS + parser.add_argument("--cluster-name", dest=CLUSTER_NAME_T, required=False) parser.add_argument( - '--create-cluster', required=False, default=False, action='store_true', - help="Create an ECS Cluster for this deployment", dest='CreateCluster' - ) - parser.add_argument( - '--cluster-name', dest=CLUSTER_NAME_T, required=False - ) - parser.add_argument( - '--use-spot-fleet', required=False, default=False, action='store_true', - dest='UseSpotFleet', + "--use-spot-fleet", + required=False, + default=False, + action="store_true", + dest=USE_FLEET_T, help="Runs spotfleet for EC2. If used in combination " - "of --use-fargate, it will create an additional SpotFleet" + "of --use-fargate, it will create an additional SpotFleet", ) return parser @@ -91,15 +96,13 @@ def root_parser(): def main(): """Console script for ecs_composex.""" parser = root_parser() - parser.add_argument('_', nargs='*') + parser.add_argument("_", nargs="*") args = parser.parse_args() - template = create_cluster_template(**vars(args)) - with open(args.output_file, 'w') as tpl_fd: - if args.output_file.endswith('.yml') or args.output_file.endswith('.yaml'): - tpl_fd.write(template.to_yaml()) - else: - tpl_fd.write(template.to_json()) + template_params = create_compute_stack(**vars(args)) + write_template_to_file(template_params[0], args.output_file) + with open(f"{args.output_file.split('.')[0]}.params.json", 'w') as params_fd: + params_fd.write(json.dumps(template_params[1], indent=4)) return 0 diff --git a/ecs_composex/compute/cluster_conditions.py b/ecs_composex/compute/cluster_conditions.py deleted file mode 100644 index ffaf4e01d..000000000 --- a/ecs_composex/compute/cluster_conditions.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- - -from troposphere import ( - Condition, Equals, Ref, Not -) -from ecs_composex.compute import cluster_params -USE_SPOT_CON_T = 'UseSpotFleetHostsCondition' -USE_SPOT_CON = Equals( - Ref(cluster_params.USE_FLEET), - 'True' -) - -NOT_USE_SPOT_CON_T = 'NotUseSpotFleetHostsCondition' -NOT_USE_SPOT_CON = Not( - Condition(USE_SPOT_CON_T) -) - -MAX_IS_MIN_T = 'CapacityMaxIsMinCondition' -MAX_IS_MIN = Equals( - Ref(cluster_params.MAX_CAPACITY), - 0 -) diff --git a/ecs_composex/compute/cluster_params.py b/ecs_composex/compute/cluster_params.py deleted file mode 100644 index 8f69fbffc..000000000 --- a/ecs_composex/compute/cluster_params.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Cluster parameters for CFN -This is a crucial part as all the titles, maked `_T` are string which are then used the same way -across all imports, which gives consistency for CFN to use the same names, -which it heavily relies onto. - -You can change the names *values* so you like so long as you keep it [a-zA-Z0-9] -""" -from troposphere import Parameter -from ecs_composex.ecs.ecs_params import CLUSTER_NAME_T - -HOST_ROLE_T = 'EcsHostsRole' -HOST_PROFILE_T = 'EcsHostsInstanceProfile' -NODES_SG_T = 'EcsHostsSg' - -CLUSTER_NAME = Parameter( - CLUSTER_NAME_T, - Type='String' -) - -USE_FLEET_T = 'UseSpotFleetHosts' -USE_FLEET = Parameter( - USE_FLEET_T, - Type='String', - Default='False', - AllowedValues=['True', 'False'] -) - -USE_ONDEMAND_T = 'UseOnDemandHosts' -USE_ONDEMAND = Parameter( - USE_ONDEMAND_T, - Type='String', - Default='False', - AllowedValues=['True', 'False'] -) - -ECS_AMI_ID_T = 'EcsAmiId' -ECS_AMI_ID = Parameter( - ECS_AMI_ID_T, - Type='AWS::SSM::Parameter::Value', - Default='/aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id' -) - -MAX_CAPACITY_T = 'EcsMaxCapacity' -MAX_CAPACITY = Parameter( - MAX_CAPACITY_T, - Type='Number', - Default=1 -) - -MIN_CAPACITY_T = 'EcsMinCapacity' -MIN_CAPACITY = Parameter( - MIN_CAPACITY_T, - Type='Number', - MinValue=1, - Default=1 -) - -TARGET_CAPACITY_T = 'EcsTargetCapacity' -TARGET_CAPACITY = Parameter( - TARGET_CAPACITY_T, - Type='Number', - Default=MIN_CAPACITY.Default -) - -LAUNCH_TEMPLATE_ID_T = 'LaunchTemplateId' -LAUNCH_TEMPLATE_ID = Parameter( - LAUNCH_TEMPLATE_ID_T, - Type='String', - Description='ID Of the Launch Template' -) - -LAUNCH_TEMPLATE_VersionNumber_T = 'LaunchTemplateVersionNumber' -LAUNCH_TEMPLATE_VersionNumber = Parameter( - LAUNCH_TEMPLATE_VersionNumber_T, - Type='Number', - Description='VersionNumber Of the Launch Template' -) diff --git a/ecs_composex/compute/cluster_template.py b/ecs_composex/compute/cluster_template.py deleted file mode 100644 index 96644f661..000000000 --- a/ecs_composex/compute/cluster_template.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Main module generating the ECS Cluster template. -""" - -from troposphere import Ref, If, GetAtt -from troposphere.cloudformation import Stack -from ecs_composex.compute import cluster_params, cluster_conditions -from ecs_composex.compute.hosts_template import add_hosts_resources -from ecs_composex.compute.spot_fleet import generate_spot_fleet_template, DEFAULT_SPOT_CONFIG -from ecs_composex.common import build_template, KEYISSET, LOG -from ecs_composex.common import cfn_conditions -from ecs_composex.common.cfn_params import ROOT_STACK_NAME, ROOT_STACK_NAME_T -from ecs_composex.common.templates import upload_template -from ecs_composex.vpc import vpc_params -from ecs_composex.ecs.ecs_params import CLUSTER_NAME - - -def add_spotfleet_stack(template, region_azs, compose_content, launch_template, **kwargs): - """ - Function to build the spotfleet stack and add it to the Cluster parent template - - :param launch_template: the launch template - :type launch_template: troposphere.ec2.LaunchTemplate - :param template: parent cluster template - :type template: troposphere.Template - :param compose_content: docker / composex file content - :type compose_content: dict - :param region_azs: List of AWS Azs i.e. ['eu-west-1a', 'eu-west-1b'] - :type region_azs: list - - :returns: void - """ - spot_config = None - if KEYISSET('configs', compose_content): - configs = compose_content['configs'] - if KEYISSET('spot_config', configs): - spot_config = configs['spot_config'] - - if spot_config: - kwargs.update({'spot_config': spot_config}) - else: - LOG.warn('No spot_config set in configs of ComposeX File. Setting to defaults') - kwargs.update({'spot_config': DEFAULT_SPOT_CONFIG}) - fleet_template = generate_spot_fleet_template(region_azs, **kwargs) - fleet_template_url = upload_template( - fleet_template.to_json(), - kwargs['BucketName'], - 'spot_fleet.json' - ) - if not fleet_template_url: - LOG.warn('Fleet template URL not returned. Not adding SpotFleet to Cluster stack') - return - template.add_resource(Stack( - 'SpotFleet', - Condition=cluster_conditions.USE_SPOT_CON_T, - TemplateURL=fleet_template_url, - Parameters={ - ROOT_STACK_NAME_T: If( - cfn_conditions.USE_STACK_NAME_CON_T, - Ref('AWS::StackName'), - Ref(ROOT_STACK_NAME) - ), - cluster_params.LAUNCH_TEMPLATE_ID_T: Ref(launch_template), - cluster_params.LAUNCH_TEMPLATE_VersionNumber_T: GetAtt( - launch_template, 'LatestVersionNumber' - ), - cluster_params.MAX_CAPACITY_T: Ref(cluster_params.MAX_CAPACITY), - cluster_params.MIN_CAPACITY_T: Ref(cluster_params.MIN_CAPACITY), - cluster_params.TARGET_CAPACITY_T: Ref(cluster_params.TARGET_CAPACITY) - } - )) - - -def generate_cluster_template(region_azs, compose_content=None, **kwargs): - """Function that generates the ECS Cluster - - :param region_azs: List of AZs for hosts, i.e. ['eu-west-1', 'eu-west-b'] - :type region_azs: list - :param compose_content: Compose dictionary to parse for services etc. - :type compose_content: dict - - :return: ECS Cluster Template - :rtype: troposphere.Template - """ - template = build_template( - 'Cluster template generated by ECS Compose X', - [ - cluster_params.CLUSTER_NAME, - cluster_params.USE_FLEET, - cluster_params.USE_ONDEMAND, - cluster_params.ECS_AMI_ID, - cluster_params.TARGET_CAPACITY, - cluster_params.MIN_CAPACITY, - cluster_params.MAX_CAPACITY, - vpc_params.APP_SUBNETS, - vpc_params.VPC_ID, - CLUSTER_NAME - ] - ) - template.add_condition( - cluster_conditions.MAX_IS_MIN_T, - cluster_conditions.MAX_IS_MIN - ) - template.add_condition( - cluster_conditions.USE_SPOT_CON_T, - cluster_conditions.USE_SPOT_CON - ) - launch_template = add_hosts_resources(template) - add_spotfleet_stack( - template, region_azs, - compose_content, launch_template, **kwargs - ) - return template diff --git a/ecs_composex/compute/compute_conditions.py b/ecs_composex/compute/compute_conditions.py new file mode 100644 index 000000000..6dc4b990b --- /dev/null +++ b/ecs_composex/compute/compute_conditions.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- + +from troposphere import Condition, Equals, Ref, Not +from ecs_composex.compute import compute_params + +MAX_IS_MIN_T = "CapacityMaxIsMinCondition" +MAX_IS_MIN = Equals(Ref(compute_params.MAX_CAPACITY), 0) diff --git a/ecs_composex/compute/compute_params.py b/ecs_composex/compute/compute_params.py new file mode 100644 index 000000000..a7f6e0c09 --- /dev/null +++ b/ecs_composex/compute/compute_params.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +""" +Cluster parameters for CFN +This is a crucial part as all the titles, maked `_T` are string which are then used the same way +across all imports, which gives consistency for CFN to use the same names, +which it heavily relies onto. + +You can change the names *values* so you like so long as you keep it [a-zA-Z0-9] +""" +from troposphere import Parameter +from ecs_composex.ecs.ecs_params import CLUSTER_NAME_T + +HOST_ROLE_T = "EcsHostsRole" +HOST_PROFILE_T = "EcsHostsInstanceProfile" +NODES_SG_T = "EcsHostsSg" + +CLUSTER_NAME = Parameter(CLUSTER_NAME_T, Type="String") + +ECS_AMI_ID_T = "EcsAmiId" +ECS_AMI_ID = Parameter( + ECS_AMI_ID_T, + Type="AWS::SSM::Parameter::Value", + Default="/aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id", +) + +MAX_CAPACITY_T = "EcsMaxCapacity" +MAX_CAPACITY = Parameter(MAX_CAPACITY_T, Type="Number", Default=1) + +MIN_CAPACITY_T = "EcsMinCapacity" +MIN_CAPACITY = Parameter(MIN_CAPACITY_T, Type="Number", MinValue=1, Default=1) + +TARGET_CAPACITY_T = "EcsTargetCapacity" +TARGET_CAPACITY = Parameter( + TARGET_CAPACITY_T, Type="Number", Default=MIN_CAPACITY.Default +) + +LAUNCH_TEMPLATE_ID_T = "LaunchTemplateId" +LAUNCH_TEMPLATE_ID = Parameter( + LAUNCH_TEMPLATE_ID_T, Type="String", Description="ID Of the Launch Template" +) + +LAUNCH_TEMPLATE_VersionNumber_T = "LaunchTemplateVersionNumber" +LAUNCH_TEMPLATE_VersionNumber = Parameter( + LAUNCH_TEMPLATE_VersionNumber_T, + Type="Number", + Description="VersionNumber Of the Launch Template", +) diff --git a/ecs_composex/compute/compute_template.py b/ecs_composex/compute/compute_template.py new file mode 100644 index 000000000..d69edf4cc --- /dev/null +++ b/ecs_composex/compute/compute_template.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- + +""" +Main module generating the ECS Cluster template. + +The root stack is to build the IAM Instance profile for the hosts that can be used for ASG or SpotFleet. +That way it is easy for anyone to deploy an instance in standalone if you wanted that. +""" + +from troposphere import Ref, If, GetAtt +from troposphere.cloudformation import Stack +from ecs_composex.compute import compute_params, compute_conditions +from ecs_composex.compute.hosts_template import add_hosts_resources +from ecs_composex.compute.spot_fleet import ( + generate_spot_fleet_template, + DEFAULT_SPOT_CONFIG, +) +from ecs_composex.common import ( + build_template, + KEYISSET, + LOG, + add_parameters, + write_template_to_file, +) +from ecs_composex.common import cfn_conditions +from ecs_composex.common.cfn_params import ROOT_STACK_NAME, ROOT_STACK_NAME_T, USE_FLEET, USE_ONDEMAND +from ecs_composex.common.templates import upload_template +from ecs_composex.vpc import vpc_params +from ecs_composex.ecs.ecs_params import CLUSTER_NAME +from ecs_composex.common.tagging import add_object_tags + + +def add_spotfleet_stack( + template, region_azs, compose_content, launch_template, tags=None, **kwargs +): + """ + Function to build the spotfleet stack and add it to the Cluster parent template + + :param launch_template: the launch template + :type launch_template: troposphere.ec2.LaunchTemplate + :param template: parent cluster template + :type template: troposphere.Template + :param compose_content: docker / composeX file content + :type compose_content: dict + :param region_azs: List of AWS Azs i.e. ['eu-west-1a', 'eu-west-1b'] + :type region_azs: list + :param tags: tuple of tags to add to objects and the template + :type tags: tuple + """ + spot_config = None + parameters = { + ROOT_STACK_NAME_T: If( + cfn_conditions.USE_STACK_NAME_CON_T, + Ref("AWS::StackName"), + Ref(ROOT_STACK_NAME), + ), + compute_params.LAUNCH_TEMPLATE_ID_T: Ref(launch_template), + compute_params.LAUNCH_TEMPLATE_VersionNumber_T: GetAtt( + launch_template, "LatestVersionNumber" + ), + compute_params.MAX_CAPACITY_T: Ref(compute_params.MAX_CAPACITY), + compute_params.MIN_CAPACITY_T: Ref(compute_params.MIN_CAPACITY), + compute_params.TARGET_CAPACITY_T: Ref(compute_params.TARGET_CAPACITY), + } + if KEYISSET("configs", compose_content): + configs = compose_content["configs"] + if KEYISSET("spot_config", configs): + spot_config = configs["spot_config"] + + if spot_config: + kwargs.update({"spot_config": spot_config}) + else: + LOG.warn("No spot_config set in configs of ComposeX File. Setting to defaults") + kwargs.update({"spot_config": DEFAULT_SPOT_CONFIG}) + fleet_template = generate_spot_fleet_template(region_azs, **kwargs) + if tags and tags[0]: + add_parameters(fleet_template, tags[0]) + for tag in tags[0]: + parameters.update({tag.title: Ref(tag.title)}) + for resource in fleet_template.resources: + add_object_tags(fleet_template.resources[resource], tags[1]) + fleet_template_url = upload_template( + fleet_template.to_json(), kwargs["BucketName"], "spot_fleet.json" + ) + if not fleet_template_url: + LOG.warn( + "Fleet template URL not returned. Not adding SpotFleet to Cluster stack" + ) + return + write_template_to_file(fleet_template, "/tmp/spot_fleet.yml") + template.add_resource( + Stack( + "SpotFleet", + Condition=cfn_conditions.USE_SPOT_CON_T, + TemplateURL=fleet_template_url, + Parameters=parameters, + ) + ) + + +def generate_compute_template(region_azs, compose_content=None, tags=None, **kwargs): + """ + Function that generates the Compute resources to run ECS services on top of EC2 + + :param tags: tuple tags to add to the template as parameters and to objects as Tags + :type tags: tuple + :param region_azs: List of AZs for hosts, i.e. ['eu-west-1', 'eu-west-b'] + :type region_azs: list + :param compose_content: Compose dictionary to parse for services etc. + :type compose_content: dict + + :return: ECS Cluster Template + :rtype: troposphere.Template + """ + if tags is None: + tags = () + template = build_template( + "Cluster template generated by ECS Compose X", + [ + USE_FLEET, + USE_ONDEMAND, + compute_params.ECS_AMI_ID, + compute_params.TARGET_CAPACITY, + compute_params.MIN_CAPACITY, + compute_params.MAX_CAPACITY, + vpc_params.APP_SUBNETS, + vpc_params.VPC_ID, + CLUSTER_NAME, + ], + ) + if tags and tags[0]: + LOG.info("adding tags") + add_parameters(template, tags[0]) + template.add_condition( + compute_conditions.MAX_IS_MIN_T, compute_conditions.MAX_IS_MIN + ) + template.add_condition( + cfn_conditions.USE_SPOT_CON_T, cfn_conditions.USE_SPOT_CON + ) + launch_template = add_hosts_resources(template) + add_spotfleet_stack( + template, region_azs, compose_content, launch_template, tags, **kwargs + ) + if tags and tags[1]: + for resource in template.resources: + add_object_tags(template.resources[resource], tags[1]) + return template diff --git a/ecs_composex/compute/hosts_template.py b/ecs_composex/compute/hosts_template.py index ee89ebb1b..b15d0b7f3 100644 --- a/ecs_composex/compute/hosts_template.py +++ b/ecs_composex/compute/hosts_template.py @@ -11,39 +11,23 @@ https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html """ -from troposphere import ( - cloudformation, Tags, Sub, - Base64, Ref, GetAtt, Join, If -) -from troposphere.ec2 import ( - Monitoring, - IamInstanceProfile -) +from troposphere import cloudformation, Tags, Sub, Base64, Ref, GetAtt, Join +from troposphere.ec2 import Monitoring, IamInstanceProfile from troposphere.ec2 import ( SecurityGroup, LaunchTemplate, LaunchTemplateData, TagSpecifications, EBSBlockDevice, - LaunchTemplateBlockDeviceMapping -) -from troposphere.iam import ( - Role, - Policy, - InstanceProfile + LaunchTemplateBlockDeviceMapping, ) +from troposphere.iam import Role, Policy, InstanceProfile -from ecs_composex.compute import cluster_params -from ecs_composex.compute.cluster_params import ( - HOST_PROFILE_T, - HOST_ROLE_T, - NODES_SG_T -) -from ecs_composex.common.cfn_conditions import USE_STACK_NAME_CON_T -from ecs_composex.common.cfn_params import ROOT_STACK_NAME_T, ROOT_STACK_NAME +from ecs_composex.compute import compute_params +from ecs_composex.compute.compute_params import HOST_PROFILE_T, HOST_ROLE_T, NODES_SG_T +from ecs_composex.ecs.ecs_params import CLUSTER_NAME_T from ecs_composex.iam import service_role_trust_policy from ecs_composex.vpc import vpc_params -from ecs_composex.ecs.ecs_params import CLUSTER_NAME_T def add_hosts_profile(template): @@ -57,52 +41,46 @@ def add_hosts_profile(template): :rtype: troposphere.iam.Role """ ecs_policy = Policy( - PolicyName='AllowEcsSpecific', + PolicyName="AllowEcsSpecific", PolicyDocument={ - 'Version': '2012-10-17', - 'Statement': [ + "Version": "2012-10-17", + "Statement": [ { - 'Effect': 'Allow', - 'Action': [ + "Effect": "Allow", + "Action": [ "ecs:RegisterContainerInstance", "ecs:UpdateContainerInstancesState", - "ecs:DeregisterContainerInstance" + "ecs:DeregisterContainerInstance", ], - 'Resource': [ + "Resource": [ Sub( "arn:${AWS::Partition}:ecs:${AWS::Region}:${AWS::AccountId}:" - f"cluster/${{{ROOT_STACK_NAME_T}}}" + f"cluster/${{{CLUSTER_NAME_T}}}" ) - ] + ], }, { - 'Effect': 'Allow', - 'Action': [ + "Effect": "Allow", + "Action": [ "ecs:StartTelemetrySession", "ecs:DiscoverPollEndpoint", "ecs:Submit*", - "ecs:Poll" + "ecs:Poll", ], - 'Resource': ['*'] - } - ] - } + "Resource": ["*"], + }, + ], + }, ) role = Role( HOST_ROLE_T, template=template, - AssumeRolePolicyDocument=service_role_trust_policy('ec2'), - ManagedPolicyArns=[ - "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM" - ], - Policies=[ecs_policy] + AssumeRolePolicyDocument=service_role_trust_policy("ec2"), + ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM"], + Policies=[ecs_policy], ) InstanceProfile( - HOST_PROFILE_T, - template=template, - Roles=[ - Ref(role) - ], + HOST_PROFILE_T, template=template, Roles=[Ref(role)], ) return role @@ -120,8 +98,8 @@ def add_hosts_security_group(template): return SecurityGroup( NODES_SG_T, template=template, - GroupDescription=Sub(f"Group for hosts in ${{{ROOT_STACK_NAME_T}}}"), - VpcId=Ref(vpc_params.VPC_ID) + GroupDescription=Sub(f"Group for hosts in ${{{CLUSTER_NAME_T}}}"), + VpcId=Ref(vpc_params.VPC_ID), ) @@ -155,173 +133,147 @@ def add_launch_template(template, hosts_sg): # ) launch_template = LaunchTemplate( - 'LaunchTemplate', + "LaunchTemplate", template=template, Metadata=cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets( - default=[ - 'vmpackages', - 'dockerconfig', - 'ecsconfig', - 'vmservices' - ] + default=["awspackages", "dockerconfig", "ecsconfig", "awsservices"] ), - vmpackages=cloudformation.InitConfig( - packages={ - 'yum': { - "awslogs": [], - "amazon-ssm-agent": [] - } - }, + awspackages=cloudformation.InitConfig( + packages={"yum": {"awslogs": [], "amazon-ssm-agent": []}}, commands={ - '001-check-packages': { - 'command': 'rpm -qa | grep amazon' - }, - '002-check-packages': { - 'command': 'rpm -qa | grep aws' - } - } + "001-check-packages": {"command": "rpm -qa | grep amazon"}, + "002-check-packages": {"command": "rpm -qa | grep aws"}, + }, ), - vmservices=cloudformation.InitConfig( + awsservices=cloudformation.InitConfig( services={ "sysvinit": { - "amazon-ssm-agent": { - "enabled": True, - "ensureRunning": True - } + "amazon-ssm-agent": {"enabled": True, "ensureRunning": True} } } ), dockerconfig=cloudformation.InitConfig( commands={ - '001-stop-docker': { - 'command': 'systemctl stop docker' - }, - '098-reload-systemd': { - 'command': 'systemctl daemon-reload' - } + "001-stop-docker": {"command": "systemctl stop docker"}, + "098-reload-systemd": {"command": "systemctl daemon-reload"}, }, files={ - '/etc/sysconfig/docker': { - 'owner': 'root', - 'group': 'root', - 'mode': '644', - 'content': Join( - '\n', + "/etc/sysconfig/docker": { + "owner": "root", + "group": "root", + "mode": "644", + "content": Join( + "\n", [ - 'DAEMON_MAXFILES=1048576', + "DAEMON_MAXFILES=1048576", Join( - ' ', - [ - "OPTIONS=--default-ulimit nofile=1024:4096" - ] + " ", + ["OPTIONS=--default-ulimit nofile=1024:4096"], ), - 'DAEMON_PIDFILE_TIMEOUT=10', - '#EOF', - '' - ] - ) + "DAEMON_PIDFILE_TIMEOUT=10", + "#EOF", + "", + ], + ), } }, services={ - 'sysvinit': { - 'docker': { - 'enabled': True, - 'ensureRunning': True, - 'files': [ - '/etc/sysconfig/docker' - ], - 'commands': [ - '098-reload-systemd' - ] + "sysvinit": { + "docker": { + "enabled": True, + "ensureRunning": True, + "files": ["/etc/sysconfig/docker"], + "commands": ["098-reload-systemd"], } } - } + }, ), ecsconfig=cloudformation.InitConfig( files={ - '/etc/ecs/ecs.config': { - 'owner': 'root', - 'group': 'root', - 'mode': '644', - 'content': Join('\n', [ - Sub(f"ECS_CLUSTER=${{{CLUSTER_NAME_T}}}"), - 'ECS_ENABLE_TASK_IAM_ROLE=true', - 'ECS_ENABLE_SPOT_INSTANCE_DRAINING=true', - 'ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST=true', - 'ECS_ENABLE_CONTAINER_METADATA=true', - 'ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP=true', - 'ECS_UPDATES_ENABLED=true', - 'ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=15m', - 'ECS_IMAGE_CLEANUP_INTERVAL=10m', - 'ECS_NUM_IMAGES_DELETE_PER_CYCLE=100', - 'ECS_ENABLE_TASK_ENI=true', - 'ECS_AWSVPC_BLOCK_IMDS=true', - 'ECS_TASK_METADATA_RPS_LIMIT=300,400', - 'ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE=true', - 'ECS_AVAILABLE_LOGGING_DRIVERS=["awslogs", "json-file"]', - '#EOF' - ]) + "/etc/ecs/ecs.config": { + "owner": "root", + "group": "root", + "mode": "644", + "content": Join( + "\n", + [ + Sub(f"ECS_CLUSTER=${{{CLUSTER_NAME_T}}}"), + "ECS_ENABLE_TASK_IAM_ROLE=true", + "ECS_ENABLE_SPOT_INSTANCE_DRAINING=true", + "ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST=true", + "ECS_ENABLE_CONTAINER_METADATA=true", + "ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP=true", + "ECS_UPDATES_ENABLED=true", + "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=15m", + "ECS_IMAGE_CLEANUP_INTERVAL=10m", + "ECS_NUM_IMAGES_DELETE_PER_CYCLE=100", + "ECS_ENABLE_TASK_ENI=true", + "ECS_AWSVPC_BLOCK_IMDS=true", + "ECS_TASK_METADATA_RPS_LIMIT=300,400", + "ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE=true", + 'ECS_AVAILABLE_LOGGING_DRIVERS=["awslogs", "json-file"]', + "#EOF", + ], + ), } }, commands={ - '0001-restartecs': { - 'command': 'systemctl --no-block restart ecs' + "0001-restartecs": { + "command": "systemctl --no-block restart ecs" } - } - ) + }, + ), ) ), LaunchTemplateData=LaunchTemplateData( BlockDeviceMappings=[ LaunchTemplateBlockDeviceMapping( - DeviceName='/dev/xvda', - Ebs=EBSBlockDevice( - DeleteOnTermination=True, - Encrypted=True - ) + DeviceName="/dev/xvda", + Ebs=EBSBlockDevice(DeleteOnTermination=True, Encrypted=True), ) ], - ImageId=Ref(cluster_params.ECS_AMI_ID), - InstanceInitiatedShutdownBehavior='terminate', + ImageId=Ref(compute_params.ECS_AMI_ID), + InstanceInitiatedShutdownBehavior="terminate", IamInstanceProfile=IamInstanceProfile( Arn=Sub(f"${{{HOST_PROFILE_T}.Arn}}") ), TagSpecifications=[ TagSpecifications( - ResourceType='instance', + ResourceType="instance", Tags=Tags( - Name=Sub(f"EcsNodes-${{{ROOT_STACK_NAME_T}}}"), - StackName=Ref('AWS::StackName'), - StackId=Ref('AWS::StackId') - ) + Name=Sub(f"EcsNodes-${{{CLUSTER_NAME_T}}}"), + StackName=Ref("AWS::StackName"), + StackId=Ref("AWS::StackId"), + ), ) ], - InstanceType='m5a.large', + InstanceType="m5a.large", Monitoring=Monitoring(Enabled=True), - SecurityGroupIds=[ - GetAtt(hosts_sg, 'GroupId') - ], - UserData=Base64(Join('\n', [ - '#!/usr/bin/env bash', - 'export PATH=$PATH:/opt/aws/bin', - 'cfn-init -v || yum install aws-cfn-bootstrap -y', - Sub(f'cfn-init --region ${{AWS::Region}} -r LaunchTemplate -s ${{AWS::StackName}}'), - # 'if [ $? -ne 0 ]; then', - # Sub(f'cfn-signal -e 1 -r "Failed to bootstrap" \'${{{wait_handle.title}}}\''), - # 'halt', - # 'else', - # Sub(f'cfn-signal -e 0 -r "Successfully bootstrapped" \'${{{wait_handle.title}}}\''), - # 'fi', - '# EOF' - ])) + SecurityGroupIds=[GetAtt(hosts_sg, "GroupId")], + UserData=Base64( + Join( + "\n", + [ + "#!/usr/bin/env bash", + "export PATH=$PATH:/opt/aws/bin", + "cfn-init -v || yum install aws-cfn-bootstrap -y", + Sub( + f"cfn-init --region ${{AWS::Region}} -r LaunchTemplate -s ${{AWS::StackName}}" + ), + # 'if [ $? -ne 0 ]; then', + # Sub(f'cfn-signal -e 1 -r "Failed to bootstrap" \'${{{wait_handle.title}}}\''), + # 'halt', + # 'else', + # Sub(f'cfn-signal -e 0 -r "Successfully bootstrapped" \'${{{wait_handle.title}}}\''), + # 'fi', + "# EOF", + ], + ) + ), ), - LaunchTemplateName=If( - USE_STACK_NAME_CON_T, - Ref('AWS::StackName'), - Ref(ROOT_STACK_NAME) - ) + LaunchTemplateName=Ref(CLUSTER_NAME_T), ) return launch_template @@ -331,10 +283,9 @@ def add_hosts_resources(template): :param template: the ecs_cluster template to add the hosts config to :type template: troposphere.Template - :param cluster: the cluster to add the hosts to - :type cluster: troposphere.ecs.Cluster - :returns: launch_template + :return: launch_template + :rtype: troposphere.ec2.LaunchTemplate """ hosts_sg = add_hosts_security_group(template) add_hosts_profile(template) diff --git a/ecs_composex/compute/spot_fleet.py b/ecs_composex/compute/spot_fleet.py index 4daf8ca92..1aa7c5b7c 100644 --- a/ecs_composex/compute/spot_fleet.py +++ b/ecs_composex/compute/spot_fleet.py @@ -4,49 +4,40 @@ Functions to add to the Cluster template when people want to use SpotFleet for their ECS Cluster. """ -from troposphere import ( - Ref, Sub, GetAtt, Select, If, Split -) +from troposphere import Ref, Sub, GetAtt, Select, If, Split from troposphere.ec2 import ( - SpotFleet, SpotFleetRequestConfigData, + SpotFleet, + SpotFleetRequestConfigData, LaunchTemplate, LaunchTemplateConfigs, LaunchTemplateSpecification, - LaunchTemplateOverrides + LaunchTemplateOverrides, ) from troposphere.applicationautoscaling import ( - ScalableTarget, StepAdjustment, + ScalableTarget, + StepAdjustment, StepScalingPolicyConfiguration, - ScalingPolicy + ScalingPolicy, ) -from troposphere.cloudwatch import ( - Alarm, - MetricDimension as CwMetricDimension -) +from troposphere.cloudwatch import Alarm, MetricDimension as CwMetricDimension from troposphere.iam import Role from ecs_composex.common import LOG, build_template from ecs_composex.iam import service_role_trust_policy from ecs_composex.vpc import vpc_params -from ecs_composex.compute import cluster_params, cluster_conditions +from ecs_composex.compute import compute_params, compute_conditions DEFAULT_SPOT_CONFIG = { - 'use_spot': True, - 'bid_price': 0.42, - 'spot_instance_types': { - 'm5a.xlarge': { - 'weight': 3 - }, - 'm5a.2xlarge': { - 'weight': 7 - }, - 'm5a.4xlarge': { - 'weight': 15 - } - } + "use_spot": True, + "bid_price": 0.42, + "spot_instance_types": { + "m5a.xlarge": {"weight": 3}, + "m5a.2xlarge": {"weight": 7}, + "m5a.4xlarge": {"weight": 15}, + }, } @@ -60,28 +51,39 @@ def add_fleet_role(template): :rtype: troposphere.iam.Role """ role = Role( - 'IamFleetRole', + "IamFleetRole", template=template, - AssumeRolePolicyDocument=service_role_trust_policy('spotfleet'), + AssumeRolePolicyDocument=service_role_trust_policy("spotfleet"), ManagedPolicyArns=[ - 'arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetAutoscaleRole', - 'arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetTaggingRole' - ] + "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetAutoscaleRole", + "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetTaggingRole", + ], ) return role def define_overrides(region_azs, lt_id, lt_version, spot_config): """Function to generate Overrides for the SpotFleet - + :param region_azs: Availability Zones of the region to create the hosts into + :type region_azs: list + :param lt_id: Launch template ID + :param lt_version: Launch Template Version + :param spot_config: SpotFleet configuration for pricing and instance types + :return: configs + :type: list """ if isinstance(lt_id, LaunchTemplate): template_id = Ref(lt_id) - template_version = GetAtt(lt_id, 'LatestVersionNumber') + template_version = GetAtt(lt_id, "LatestVersionNumber") elif not isinstance(lt_id, Ref) or not isinstance(lt_version, Ref): raise TypeError( - f"The launch template and/or version are of type", lt_id, lt_version, - "Expected", LaunchTemplate, "Or", Ref + f"The launch template and/or version are of type", + lt_id, + lt_version, + "Expected", + LaunchTemplate, + "Or", + Ref, ) else: template_id = lt_id @@ -92,21 +94,22 @@ def define_overrides(region_azs, lt_id, lt_version, spot_config): LOG.debug(spot_config) for count, az in enumerate(region_azs): LOG.debug(az) - for itype in spot_config['spot_instance_types']: + for itype in spot_config["spot_instance_types"]: overrides.append( LaunchTemplateOverrides( - SubnetId=Select(count, Split(',', vpc_params.APP_SUBNETS_IMPORT)), - WeightedCapacity=spot_config['spot_instance_types'][itype]['weight'], - InstanceType=itype + SubnetId=Select(count, Split(",", vpc_params.APP_SUBNETS_IMPORT)), + WeightedCapacity=spot_config["spot_instance_types"][itype][ + "weight" + ], + InstanceType=itype, ) ) configs.append( LaunchTemplateConfigs( LaunchTemplateSpecification=LaunchTemplateSpecification( - LaunchTemplateId=template_id, - Version=template_version + LaunchTemplateId=template_id, Version=template_version ), - Overrides=overrides + Overrides=overrides, ) ) return configs @@ -129,55 +132,49 @@ def add_scaling_policies(template, spot_fleet, role): f"SpotFleetScalingTarget{spot_fleet.title}", template=template, MaxCapacity=If( - cluster_conditions.MAX_IS_MIN_T, - Ref(cluster_params.MIN_CAPACITY), - Ref(cluster_params.MAX_CAPACITY) + compute_conditions.MAX_IS_MIN_T, + Ref(compute_params.MIN_CAPACITY), + Ref(compute_params.MAX_CAPACITY), ), - MinCapacity=Ref(cluster_params.MIN_CAPACITY), - ResourceId=Sub(f'spot-fleet-request/${{{spot_fleet.title}}}'), - RoleARN=GetAtt(role, 'Arn'), - ServiceNamespace='ec2', - ScalableDimension='ec2:spot-fleet-request:TargetCapacity' + MinCapacity=Ref(compute_params.MIN_CAPACITY), + ResourceId=Sub(f"spot-fleet-request/${{{spot_fleet.title}}}"), + RoleARN=GetAtt(role, "Arn"), + ServiceNamespace="ec2", + ScalableDimension="ec2:spot-fleet-request:TargetCapacity", ) scale_in_policy = ScalingPolicy( f"FleetScalingIn{spot_fleet.title}", template=template, - PolicyName=Sub(f'${{{spot_fleet.title}}}ScalingIn'), - PolicyType='StepScaling', + PolicyName=Sub(f"${{{spot_fleet.title}}}ScalingIn"), + PolicyType="StepScaling", ScalingTargetId=Ref(target), StepScalingPolicyConfiguration=StepScalingPolicyConfiguration( - AdjustmentType='ChangeInCapacity', + AdjustmentType="ChangeInCapacity", Cooldown=300, - MetricAggregationType='Average', + MetricAggregationType="Average", StepAdjustments=[ - StepAdjustment( - MetricIntervalLowerBound=10, - ScalingAdjustment='-1' - ) - ] - ) + StepAdjustment(MetricIntervalLowerBound=10, ScalingAdjustment="-1") + ], + ), ) scale_out_policy = ScalingPolicy( f"FleetScalingOut{spot_fleet.title}", template=template, - PolicyName=Sub(f'${{{spot_fleet.title}}}CpuAverageScaleOut'), - PolicyType='StepScaling', + PolicyName=Sub(f"${{{spot_fleet.title}}}CpuAverageScaleOut"), + PolicyType="StepScaling", ScalingTargetId=Ref(target), StepScalingPolicyConfiguration=StepScalingPolicyConfiguration( - AdjustmentType='ChangeInCapacity', + AdjustmentType="ChangeInCapacity", Cooldown=300, - MetricAggregationType='Average', + MetricAggregationType="Average", StepAdjustments=[ - StepAdjustment( - MetricIntervalLowerBound=10, - ScalingAdjustment='1' - ) - ] - ) + StepAdjustment(MetricIntervalLowerBound=10, ScalingAdjustment="1") + ], + ), ) - return (scale_in_policy, scale_out_policy) + return scale_in_policy, scale_out_policy def define_default_cw_alarms(template, spot_fleet, scaling_set): @@ -198,51 +195,37 @@ def define_default_cw_alarms(template, spot_fleet, scaling_set): f"LowCpuAverageAlarm{spot_fleet.title}", template=template, ActionsEnabled=True, - AlarmActions=[ - Ref(scaling_set[0]) - ], + AlarmActions=[Ref(scaling_set[0])], AlarmDescription=Sub(f"LOW CPU USAGE FOR ${{{spot_fleet.title}}}"), - AlarmName=Sub(f'CPU_AVG_LOW_${{{spot_fleet.title}}}'), - ComparisonOperator='LessThanOrEqualToThreshold', - Dimensions=[ - CwMetricDimension( - Name='FleetRequestId', - Value=Ref(spot_fleet) - ) - ], + AlarmName=Sub(f"CPU_AVG_LOW_${{{spot_fleet.title}}}"), + ComparisonOperator="LessThanOrEqualToThreshold", + Dimensions=[CwMetricDimension(Name="FleetRequestId", Value=Ref(spot_fleet))], EvaluationPeriods=5, Period=60, - Namespace='AWS/EC2Spot', - MetricName='CPUUtilization', - Statistic='Average', - Threshold='25', - Unit='Percent', - TreatMissingData='notBreaching' + Namespace="AWS/EC2Spot", + MetricName="CPUUtilization", + Statistic="Average", + Threshold="25", + Unit="Percent", + TreatMissingData="notBreaching", ) Alarm( - f'HighCpuAverageAlarm{spot_fleet.title}', + f"HighCpuAverageAlarm{spot_fleet.title}", ActionsEnabled=True, - AlarmActions=[ - Ref(scaling_set[1]) - ], + AlarmActions=[Ref(scaling_set[1])], AlarmDescription=Sub(f"LOW CPU USAGE FOR ${{{spot_fleet.title}}}"), - AlarmName=Sub(f'CPU_AVG_HIGH_${{{spot_fleet.title}}}'), - ComparisonOperator='GreaterThanOrEqualToThreshold', - Dimensions=[ - CwMetricDimension( - Name='FleetRequestId', - Value=Ref(spot_fleet) - ) - ], + AlarmName=Sub(f"CPU_AVG_HIGH_${{{spot_fleet.title}}}"), + ComparisonOperator="GreaterThanOrEqualToThreshold", + Dimensions=[CwMetricDimension(Name="FleetRequestId", Value=Ref(spot_fleet))], EvaluationPeriods=5, Period=60, - Namespace='AWS/EC2Spot', - MetricName='CPUUtilization', - Statistic='Average', - Threshold='65', - Unit='Percent', - TreatMissingData='notBreaching' + Namespace="AWS/EC2Spot", + MetricName="CPUUtilization", + Statistic="Average", + Threshold="65", + Unit="Percent", + TreatMissingData="notBreaching", ) @@ -257,22 +240,22 @@ def define_spot_fleet(template, region_azs, lt_id, lt_version, **kwargs): :returns: NIL """ - configs = define_overrides(region_azs, lt_id, lt_version, kwargs['spot_config']) + configs = define_overrides(region_azs, lt_id, lt_version, kwargs["spot_config"]) role = add_fleet_role(template) fleet = SpotFleet( f"EcsClusterFleet", template=template, SpotFleetRequestConfigData=SpotFleetRequestConfigData( - AllocationStrategy='diversified', - ExcessCapacityTerminationPolicy='default', - IamFleetRole=GetAtt(role, 'Arn'), - InstanceInterruptionBehavior='terminate', - TargetCapacity=Ref(cluster_params.TARGET_CAPACITY), - Type='maintain', - SpotPrice=str(kwargs['spot_config']['bid_price']), + AllocationStrategy="diversified", + ExcessCapacityTerminationPolicy="default", + IamFleetRole=GetAtt(role, "Arn"), + InstanceInterruptionBehavior="terminate", + TargetCapacity=Ref(compute_params.TARGET_CAPACITY), + Type="maintain", + SpotPrice=str(kwargs["spot_config"]["bid_price"]), ReplaceUnhealthyInstances=True, LaunchTemplateConfigs=configs, - ) + ), ) scaling_set = add_scaling_policies(template, fleet, role) define_default_cw_alarms(template, fleet, scaling_set) @@ -283,20 +266,19 @@ def generate_spot_fleet_template(region_azs, **kwargs): Generates a standalone template for SpotFleet """ template = build_template( - 'Template For SpotFleet As Part Of EcsCluster', + "Template For SpotFleet As Part Of EcsCluster", [ - cluster_params.LAUNCH_TEMPLATE_ID, - cluster_params.LAUNCH_TEMPLATE_VersionNumber, - cluster_params.MIN_CAPACITY, - cluster_params.MAX_CAPACITY, - cluster_params.TARGET_CAPACITY - ] + compute_params.LAUNCH_TEMPLATE_ID, + compute_params.LAUNCH_TEMPLATE_VersionNumber, + compute_params.MIN_CAPACITY, + compute_params.MAX_CAPACITY, + compute_params.TARGET_CAPACITY, + ], ) template.add_condition( - cluster_conditions.MAX_IS_MIN_T, - cluster_conditions.MAX_IS_MIN + compute_conditions.MAX_IS_MIN_T, compute_conditions.MAX_IS_MIN ) - lt_id = Ref(cluster_params.LAUNCH_TEMPLATE_ID) - lt_version = Ref(cluster_params.LAUNCH_TEMPLATE_VersionNumber) + lt_id = Ref(compute_params.LAUNCH_TEMPLATE_ID) + lt_version = Ref(compute_params.LAUNCH_TEMPLATE_VersionNumber) define_spot_fleet(template, region_azs, lt_id, lt_version, **kwargs) return template diff --git a/ecs_composex/root.py b/ecs_composex/root.py index e35b309d1..3c53c0d23 100644 --- a/ecs_composex/root.py +++ b/ecs_composex/root.py @@ -1,31 +1,32 @@ # -*- coding: utf-8 -*- """Module to generate a full stack with VPC & Cluster.""" -import json - import boto3 from troposphere import Ref, GetAtt, If, Join from troposphere.cloudformation import Stack from troposphere.ecs import Cluster from ecs_composex import XFILE_DEST -from ecs_composex.common import LOG, add_parameters, validate_kwargs +from ecs_composex.common import ( + LOG, add_parameters, validate_kwargs, build_parameters_file, + build_default_stack_parameters +) from ecs_composex.common import ( build_template, KEYISSET, load_composex_file ) from ecs_composex.common.cfn_params import ( - ROOT_STACK_NAME_T + ROOT_STACK_NAME_T, + USE_FLEET, USE_FLEET_T, + USE_ONDEMAND, USE_ONDEMAND_T ) from ecs_composex.common.tagging import generate_tags_parameters, add_object_tags from ecs_composex.common.templates import upload_template -from ecs_composex.compute import create_cluster_template -from ecs_composex.compute.cluster_params import ( +from ecs_composex.compute import create_compute_stack +from ecs_composex.compute.compute_params import ( TARGET_CAPACITY_T, TARGET_CAPACITY, - MIN_CAPACITY_T, - USE_FLEET, USE_FLEET_T, - USE_ONDEMAND, USE_ONDEMAND_T + MIN_CAPACITY_T ) from ecs_composex.ecs import ecs_params, create_services_templates from ecs_composex.ecs.ecs_conditions import ( @@ -44,28 +45,6 @@ ROOT_CLUSTER_NAME = 'EcsCluster' -def build_parameters_file(params: list, parameter_name: str, parameter_value): - """ - Function to build arguments file to pass onto CFN. - Adds the parameter key/value so it can be written to file afterwards - - :param params: list of parameters - :type params: list - :param parameter_name: key of the parameter - :type parameter_name: str - :param parameter_value: value of the parameter - :type parameter_value: str or int or list - """ - if params is None: - params = [] - if isinstance(parameter_value, (int, float)): - parameter_value = str(parameter_value) - params.append({ - "ParameterKey": parameter_name, - "ParameterValue": parameter_value - }) - - def generate_vpc_parameters(template, params, **kwargs): """Function to add the VPC arguments to the root stack :param params: list of parameters @@ -109,6 +88,8 @@ def add_vpc_to_root(root_template, session, tags_params=None, **kwargs): :return: vpc_stack :rtype: troposphere.cloudformation.Stack """ + if tags_params is None: + tags_params = () vpc_template = create_vpc_template( session=session, **kwargs @@ -137,24 +118,38 @@ def add_vpc_to_root(root_template, session, tags_params=None, **kwargs): return vpc_stack -def add_cluster_to_root(root_template, params=None, vpc_stack=None, session=None, **kwargs): +def add_compute(root_template, dependencies, params, vpc_stack=None, tags=None, session=None, **kwargs): """ - Function to add Cluster stack to root one. + Function to add Cluster stack to root one. If any of the options related to compute resources are set in the CLI + then this function will generate and add the compute template to the root stack template + :param dependencies: list of dependencies that need created before creating the Compute stack + :type dependencies: list :param root_template: the root template :type root_template: troposphere.Template - :param params: - :param vpc_stack: - :param session: - :param kwargs: + :param params: list of parameters + :param vpc_stack: the VPC stack if any to pull the attributes from + :param session: override boto3 session + :param tags: tags to add to the stack + :type tags: tuple - :return: + :return: compute_stack, the Compute stack + :rtype: troposphere.cloudformation.Stack """ + create_compute = False + args = [USE_FLEET_T, USE_ONDEMAND_T, "AddComputeResources"] + for arg in args: + if KEYISSET(arg, kwargs): + create_compute = True + if not create_compute: + return if params is None: params = [] + if tags is None: + tags = () depends_on = [] root_template.add_parameter(TARGET_CAPACITY) - cluster_template = create_cluster_template(session, **kwargs) + compute_template = create_compute_stack(session, **kwargs) parameters = { ROOT_STACK_NAME_T: Ref('AWS::StackName'), TARGET_CAPACITY_T: Ref(TARGET_CAPACITY), @@ -162,6 +157,8 @@ def add_cluster_to_root(root_template, params=None, vpc_stack=None, session=None USE_FLEET_T: Ref(USE_FLEET), USE_ONDEMAND_T: Ref(USE_ONDEMAND) } + for tag in tags[0]: + parameters.update({tag.title: Ref(tag.title)}) if vpc_stack is not None: depends_on.append(vpc_stack) parameters.update({ @@ -169,29 +166,30 @@ def add_cluster_to_root(root_template, params=None, vpc_stack=None, session=None vpc_params.APP_SUBNETS_T: GetAtt(vpc_stack, f"Outputs.{vpc_params.APP_SUBNETS_T}"), }) else: - # Setup parameters for cluster stack without VPC pre-created - if not kwargs['AppSubnets']: - raise ValueError("No application subnets were provided to create the cluster") - elif params is None: + # Setup parameters for compute stack without VPC pre-created + if not kwargs[vpc_params.APP_SUBNETS_T]: + raise ValueError("No application subnets were provided to create the compute") + if params is None: raise TypeError("params is ", params, "expected", list) root_template.add_parameter(vpc_params.APP_SUBNETS) parameters.update({ vpc_params.APP_SUBNETS_T: Ref(vpc_params.APP_SUBNETS) }) - build_parameters_file(params, vpc_params.APP_SUBNETS_T, kwargs['AppSubnets']) + build_parameters_file(params, vpc_params.APP_SUBNETS_T, kwargs[vpc_params.APP_SUBNETS_T]) - cluster_stack = root_template.add_resource(Stack( + compute_stack = root_template.add_resource(Stack( 'Compute', TemplateURL=upload_template( - template_body=cluster_template.to_json(), + template_body=compute_template[0].to_json(), bucket_name=kwargs['BucketName'], - file_name='cluster.json', - session=session + file_name='compute.json', + session=session, ), - Parameters=parameters - ) - ) - return cluster_stack + Parameters=parameters, + DependsOn=dependencies + )) + dependencies.append('Compute') + return compute_stack def add_x_resources(template, session, tags=None, **kwargs): @@ -261,7 +259,6 @@ def add_services(template, depends, session, vpc_stack=None, **kwargs): vpc_params.VPC_ID_T: GetAtt(vpc_stack, f'Outputs.{vpc_params.VPC_ID_T}'), vpc_params.PUBLIC_SUBNETS_T: GetAtt(vpc_stack, f'Outputs.{vpc_params.PUBLIC_SUBNETS_T}'), vpc_params.APP_SUBNETS_T: GetAtt(vpc_stack, f'Outputs.{vpc_params.APP_SUBNETS_T}'), - }) else: parameters.update({ @@ -303,14 +300,19 @@ def add_ecs_cluster(template, depends_on=None): ) -def init_root_template(session, stack_params, tags=None, **kwargs): +def init_root_template(stack_params, tags=None): """ Function to initialize the root template - :param session: - :param tags_params: - :param kwargs: - :return: + + :param stack_params: stack parameters + :type stack_params: list + :param tags: tags and parameters to add to the template + :type tags: tuple + + :return: template + :rtype: troposphere.Template """ + template = build_template( 'Root template generated via ECS ComposeX', [USE_FLEET, USE_ONDEMAND, CLUSTER_NAME] @@ -334,16 +336,16 @@ def generate_full_template(session=None, **kwargs): if session is None: session = boto3.session.Session(region_name=kwargs['AwsRegion']) stack_params = [] + build_default_stack_parameters(stack_params, **kwargs) tags_params = generate_tags_parameters(load_composex_file(kwargs[XFILE_DEST])) - template = init_root_template(session, stack_params, tags_params, **kwargs) + template = init_root_template(stack_params, tags_params) LOG.debug(kwargs) validate_kwargs(['BucketName'], kwargs) - vpc_stack = None - depends = add_x_resources(template, session=session, **kwargs) + depends_on = add_x_resources(template, session=session, **kwargs) if KEYISSET('CreateVpc', kwargs): vpc_stack = add_vpc_to_root(template, session, tags_params[0], **kwargs) - depends.append(vpc_stack) + depends_on.append(vpc_stack) add_object_tags(vpc_stack, tags_params[1]) else: generate_vpc_parameters(template, stack_params, **kwargs) @@ -351,12 +353,11 @@ def generate_full_template(session=None, **kwargs): if KEYISSET(CLUSTER_NAME_T, kwargs): build_parameters_file(stack_params, CLUSTER_NAME_T, kwargs[CLUSTER_NAME_T]) if KEYISSET('CreateCluster', kwargs): - add_ecs_cluster(template, depends) - depends.append(ROOT_CLUSTER_NAME) - add_services(template, depends, session=session, vpc_stack=vpc_stack, **kwargs) + add_ecs_cluster(template, depends_on) + depends_on.append(ROOT_CLUSTER_NAME) + add_compute(template, depends_on, stack_params, vpc_stack, tags=tags_params, session=session, **kwargs) + add_services(template, depends_on, session=session, vpc_stack=vpc_stack, **kwargs) + for resource in template.resources: add_object_tags(template.resources[resource], tags_params[1]) - with open(kwargs['output_file'], 'w') as root_fd: - root_fd.write(template.to_json()) - with open(f"{kwargs['output_file'].split('.')[0]}.params.json", 'w') as params_fd: - params_fd.write(json.dumps(stack_params, indent=4)) + return template, stack_params diff --git a/examples/services_with_queues.yml b/examples/services_with_queues.yml index cbfdeb207..8717203b0 100644 --- a/examples/services_with_queues.yml +++ b/examples/services_with_queues.yml @@ -23,7 +23,6 @@ configs: composex: use_exports: false use_ssm: false - spot_config: bid_price: 0.42 use_spot: true diff --git a/setup.py b/setup.py index 6c8f2c4d9..5e0fd8153 100644 --- a/setup.py +++ b/setup.py @@ -39,6 +39,7 @@ entry_points={ 'console_scripts': [ 'ecs_composex=ecs_composex.cli:main', + 'ecs_composex-compute=ecs_composex.compute.cli:main', 'ecs_composex-vpc=ecs_composex.vpc.cli:main', 'ecs_composex-sqs=ecs_composex.sqs.cli:main' ] diff --git a/tests/services_with_queues.yml b/tests/services_with_queues.yml index 39ad417ea..98a042f3f 100644 --- a/tests/services_with_queues.yml +++ b/tests/services_with_queues.yml @@ -1,6 +1,5 @@ configs: - app01: ext_sources: - ipv4: 0.0.0.0/0 @@ -23,7 +22,6 @@ configs: composex: use_exports: false use_ssm: false - spot_config: bid_price: 0.42 use_spot: true @@ -34,6 +32,7 @@ configs: weight: 8 m5a.4xlarge: weight: 16 + # List like tags x-tags: - name: costcentre