Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SQS Scaling #194

Merged
merged 7 commits into from
Oct 9, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions ecs_composex/ecs/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ Features that ECS ComposeX takes care of for you, if you needed to:
* Adds X-Ray side car when you need distributed tracing
* Calculates the compute requirements based on the docker-compose v3 declaration
* Supports to add IAM permission boundary for extended security precautions.
* Supports for scaling definitions
* SQS based step scaling
* Target Tracking scaling for CPU/RAM

.. note::

Expand Down
41 changes: 40 additions & 1 deletion ecs_composex/ecs/SYNTAX.rst
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,47 @@ which is used for cloudmap or the load-balancer to register the targets.
This is used for network healthchecks, not service healthcheck


scaling
^^^^^^^

This section allows to define scaling for the ECS Service.
For SQS Based scaling using step scaling, refer to SQS Documentation.

.. code-block:: yaml

services:
serviceA:
x-configs:
scaling:
range: "1-10"
target_tracking:
cpu_target: 80

range
"""""

Range, defines the minimum and maximum number of containers you will have running in the cluster.

.. code-block:: yaml

#Syntax
# range: "<min>-<max>"
# Example
range: "1-21"


allow_zero
"""""""""""

Boolean to allow the scaling to go all the way down to 0 containers running. Perfect for cost savings and get to pure
event driven architecture.

.. hint::

If you set the range minimum above 0 and then set allow_zero to True, it will override the minimum value.

target_scaling
^^^^^^^^^^^^^^
""""""""""""""

Allows you to define target scaling for the service based on CPU/RAM.

Expand Down
1 change: 0 additions & 1 deletion ecs_composex/ecs/ecs_container_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ def import_secrets(template, service, container, settings):
:param ecs_composex.common.settings.ComposeXSettings settings:
:return:
"""
print(service, type(service))
if not service.secrets:
return
if not keyisset("secrets", settings.compose_content):
Expand Down
158 changes: 158 additions & 0 deletions ecs_composex/ecs/ecs_scaling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
# -*- coding: utf-8 -*-
# ECS ComposeX <https://github.com/lambda-my-aws/ecs_composex>
# Copyright (C) 2020 John Mille <john@lambda-my-aws.io>
# #
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# #
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# #
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.

"""
Module to help generate target scaling policies for given alarms.
"""

import random
import string

from troposphere import Ref, AWS_NO_VALUE
from troposphere.applicationautoscaling import (
ScalingPolicy,
StepScalingPolicyConfiguration,
StepAdjustment,
)

from ecs_composex.common import LOG, keyisset
from ecs_composex.ecs.ecs_params import SERVICE_SCALING_TARGET


def generate_scaling_out_steps(steps, target):
"""

:param list steps:
:param tropsphere.applicationautoscaling.ScalingTarget target: The defined max in the Scalable Target
:return:
"""
unordered = []
allowed_keys = ["lower_bound", "upper_bound", "count"]
for step_def in steps:
if not all(key in allowed_keys for key in step_def.keys()):
raise KeyError(
"Step definition only allows", allowed_keys, "Got", step_def.keys()
)
if (
keyisset("upper_bound", step_def)
and step_def["lower_bound"] >= step_def["upper_bound"]
):
raise ValueError(
"The lower_bound value must strictly lower than the upper bound",
step_def,
)
unordered.append(step_def)
ordered = sorted(unordered, key=lambda i: i["lower_bound"])
if target and ordered[-1]["count"] > target.MaxCapacity:
LOG.warn(
f"The current maximum in your range is {target.MaxCapacity} whereas you defined {ordered[-1]['count']}"
" for step scaling. Adjusting to step scaling max."
)
setattr(target, "MaxCapacity", ordered[-1]["count"])
cfn_steps = []
pre_upper = 0
for step_def in ordered:
if pre_upper and not int(step_def["lower_bound"]) >= pre_upper:
raise ValueError(
f"The value for lower bound is {step_def['lower_bound']},"
f"which is higher than the previous upper_bound, {pre_upper}"
)
cfn_steps.append(
StepAdjustment(
MetricIntervalLowerBound=int(step_def["lower_bound"]),
MetricIntervalUpperBound=int(step_def["upper_bound"])
if keyisset("upper_bound", step_def)
else Ref(AWS_NO_VALUE),
ScalingAdjustment=int(step_def["count"]),
)
)
pre_upper = (
int(step_def["upper_bound"]) if keyisset("upper_bound", step_def) else None
)
if hasattr(cfn_steps[-1], "MetricIntervalUpperBound") and not isinstance(
getattr(cfn_steps[-1], "MetricIntervalUpperBound"), Ref
):
LOG.warn("The last upper bound shall not be set. Deleting value to comply}")
setattr(cfn_steps[-1], "MetricIntervalUpperBound", Ref(AWS_NO_VALUE))
return cfn_steps


def generate_alarm_scaling_out_policy(
service_name, service_template, scaling_def, scaling_source=None
):
"""
:param str service_name: The name of the service/family
:param troposphere.Template service_template:
:param dict scaling_def:
:param str scaling_source:
:return:
"""
if not keyisset("steps", scaling_def):
raise KeyError("No steps were defined in the scaling definition", scaling_def)
steps_definition = scaling_def["steps"]
length = 6
if not scaling_source:
scaling_source = "".join(
random.choice(string.ascii_lowercase) for _ in range(length)
)
scalable_target = service_template.resources[SERVICE_SCALING_TARGET]
step_adjustments = generate_scaling_out_steps(
steps_definition, target=scalable_target
)
policy = ScalingPolicy(
f"ScalingOutPolicy{scaling_source}{service_name}",
template=service_template,
PolicyName=f"ScalingOutPolicy{scaling_source}{service_name}",
PolicyType="StepScaling",
ScalingTargetId=Ref(SERVICE_SCALING_TARGET),
ServiceNamespace="ecs",
StepScalingPolicyConfiguration=StepScalingPolicyConfiguration(
AdjustmentType="ExactCapacity",
StepAdjustments=step_adjustments,
),
)
return policy


def reset_to_zero_policy(service_name, service_template, scaling_source=None):
"""

:return:
"""
length = 6
if not scaling_source:
scaling_source = "".join(
random.choice(string.ascii_lowercase) for _ in range(length)
)
policy = ScalingPolicy(
f"ScalingInPolicy{scaling_source}{service_name}",
template=service_template,
PolicyName=f"ScalingInPolicy{scaling_source}{service_name}",
PolicyType="StepScaling",
ScalingTargetId=Ref(SERVICE_SCALING_TARGET),
ServiceNamespace="ecs",
StepScalingPolicyConfiguration=StepScalingPolicyConfiguration(
AdjustmentType="ExactCapacity",
StepAdjustments=[
StepAdjustment(
MetricIntervalUpperBound=0,
ScalingAdjustment=0,
)
],
),
)
return policy
13 changes: 6 additions & 7 deletions ecs_composex/ecs/ecs_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,13 +405,13 @@ def create_scalable_target(self):
"""
Method to automatically create a scalable target
"""
LOG.debug(self.config.target_scaling_config)
if self.config.target_scaling_config:
LOG.debug(self.config.scaling_range)
if self.config.scaling_range:
self.scalable_target = applicationautoscaling.ScalableTarget(
ecs_params.SERVICE_SCALING_TARGET,
template=self.template,
MaxCapacity=self.config.target_scaling_config["max"],
MinCapacity=self.config.target_scaling_config["min"],
MaxCapacity=self.config.scaling_range["max"],
MinCapacity=self.config.scaling_range["min"],
ScalableDimension="ecs:service:DesiredCount",
ServiceNamespace="ecs",
RoleARN=Sub(
Expand All @@ -423,11 +423,10 @@ def create_scalable_target(self):
f"service/${{{ecs_params.CLUSTER_NAME.title}}}/${{{self.ecs_service.title}.Name}}"
),
SuspendedState=applicationautoscaling.SuspendedState(
DynamicScalingInSuspended=self.config.target_scaling_config[
"disable_scale_in"
]
DynamicScalingInSuspended=False
),
)
if self.scalable_target and self.config.target_scaling_config:
if keyisset("cpu_target", self.config.target_scaling_config):
applicationautoscaling.ScalingPolicy(
"ServiceCpuTrackingPolicy",
Expand Down
59 changes: 35 additions & 24 deletions ecs_composex/ecs/ecs_service_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,12 +204,12 @@ class ServiceConfig(object):
"deploy",
"external_links",
]
target_scaling_key = "target_scaling"
scaling_key = "scaling"
required_keys = ["image"]

master_key = "x-configs"
composex_key = "composex"
valid_config_keys = ["network", "iam", "x-ray", "logging", target_scaling_key]
valid_config_keys = ["network", "iam", "x-ray", "logging", scaling_key]

network_defaults = {
"use_cloudmap": True,
Expand Down Expand Up @@ -240,6 +240,7 @@ def __init__(self, service, content, family_name=None):
self.use_appmesh = False
self.boundary = None
self.target_scaling_config = None
self.scaling_range = None
self.policies = []
self.managed_policies = []
self.container_start_condition = "START"
Expand Down Expand Up @@ -333,38 +334,27 @@ def add_managed_policies(self, policies):
policy_def = define_iam_policy(policy)
self.managed_policies.append(policy_def)

def init_target_scaling(self, config):
def set_target_scaling(self, config):
"""
Method to setup target scaling for the service.
Method to define target_scaling

:param dict config:
:return:
"""
LOG.debug("Setting target scaling")
scaling_configuration = {}
allowed_keys = {
"range": str,
"cpu_target": int,
"memory_target": int,
"lb_targets": int,
"scale_in_cooldown": int,
"scale_out_cooldown": int,
"disable_scale_in": bool,
}
if not all(key in list(allowed_keys.keys()) for key in config.keys()):
raise KeyError(
"Found invalid key. Got",
config,
"Allowed",
allowed_keys,
)
default_values = {
"scale_out_cooldown": 300,
"scale_in_cooldown": 300,
"disable_scale_in": False,
}
if not keyisset("range", config):
raise KeyError(
"Missing range property. Range should written as follows: {min}-{max}"
)
scaling_configuration = {}
for key in allowed_keys.keys():
if not keyisset(key, config) and keypresent(key, default_values):
scaling_configuration[key] = default_values[key]
Expand All @@ -379,15 +369,36 @@ def init_target_scaling(self, config):
"Expected",
allowed_keys[key],
)
scaling_configuration.update(
{
"max": int(config["range"].split("-")[-1]),
"min": int(config["range"].split("-")[0]),
}
)
LOG.debug(scaling_configuration)
self.target_scaling_config = scaling_configuration

def init_scaling(self, config):
"""
Method to setup target scaling for the service.
:return:
"""
LOG.debug("Setting target scaling")
allowed_keys = {"range": str, "target_scaling": dict, "allow_zero": bool}
if not all(key in list(allowed_keys.keys()) for key in config.keys()):
raise KeyError(
"Found invalid key. Got",
config,
"Allowed",
allowed_keys,
)
if not keyisset("range", config):
raise KeyError(
"Missing range property. Range should written as follows: {min}-{max}"
)
self.scaling_range = {
"max": int(config["range"].split("-")[-1]),
"min": int(config["range"].split("-")[0]),
}
if keyisset("allow_zero", config) and not self.scaling_range["min"] == 0:
self.scaling_range["min"] = 0
if keyisset("target_scaling", config):
self.set_target_scaling(config["target_scaling"])

def add_policies(self, policies):

for count, policy in enumerate(policies):
Expand Down
Loading