diff --git a/conf/clusters/prod.json b/conf/clusters/prod.json index 92bed88b6..661729734 100644 --- a/conf/clusters/prod.json +++ b/conf/clusters/prod.json @@ -18,16 +18,10 @@ } }, "kinesis_events": { - "enabled": true, - "batch_size": 100 + "batch_size": 100, + "enabled": true }, "stream_alert": { - "alert_processor": { - "current_version": "$LATEST", - "log_level": "info", - "memory": 128, - "timeout": 60 - }, "rule_processor": { "current_version": "$LATEST", "enable_metrics": true, diff --git a/conf/lambda.json b/conf/lambda.json index e38efff97..cb62742b9 100644 --- a/conf/lambda.json +++ b/conf/lambda.json @@ -1,10 +1,32 @@ { "alert_processor_config": { + "current_version": "$LATEST", "handler": "stream_alert.alert_processor.main.handler", + "log_level": "info", + "log_retention_days": 14, + "memory": 128, + "metric_alarms": { + "enabled": true, + "errors_alarm_evaluation_periods": 1, + "errors_alarm_period_secs": 120, + "errors_alarm_threshold": 0, + "throttles_alarm_evaluation_periods": 1, + "throttles_alarm_period_secs": 120, + "throttles_alarm_threshold": 0 + }, + "outputs": { + "aws-lambda": [], + "aws-s3": [] + }, "source_bucket": "PREFIX_GOES_HERE.streamalert.source", "source_current_hash": "", "source_object_key": "", - "third_party_libraries": [] + "third_party_libraries": [], + "timeout": 60, + "vpc_config": { + "security_group_ids": [], + "subnet_ids": [] + } }, "rule_processor_config": { "handler": "stream_alert.rule_processor.main.handler", diff --git a/conf/logs.json b/conf/logs.json index d56527926..d8ae5018d 100644 --- a/conf/logs.json +++ b/conf/logs.json @@ -1496,4 +1496,4 @@ } } } -} +} \ No newline at end of file diff --git a/conf/sources.json b/conf/sources.json index 240da5ccf..0b0f2f8a9 100644 --- a/conf/sources.json +++ b/conf/sources.json @@ -17,17 +17,17 @@ } }, "stream_alert_app": { - "prefix_cluster_duo_auth_sm-app-name_app": { + "prefix_cluster_box_admin_events_sm-app-name_app": { "logs": [ - "duo" + "box" ] }, - "prefix_cluster_onelogin-events-app-name_app": { + "prefix_cluster_duo_admin_sm-app-name_app": { "logs": [ - "onelogin" + "duo" ] }, - "prefix_cluster_duo_admin_sm-app-name_app": { + "prefix_cluster_duo_auth_sm-app-name_app": { "logs": [ "duo" ] @@ -37,9 +37,9 @@ "gsuite" ] }, - "prefix_cluster_box_admin_events_sm-app-name_app": { + "prefix_cluster_onelogin-events-app-name_app": { "logs": [ - "box" + "onelogin" ] } } diff --git a/stream_alert/rule_processor/sink.py b/stream_alert/rule_processor/sink.py index 2627de557..7c0a3b98b 100644 --- a/stream_alert/rule_processor/sink.py +++ b/stream_alert/rule_processor/sink.py @@ -14,6 +14,7 @@ limitations under the License. """ import json +import os import boto3 from botocore.exceptions import ClientError @@ -31,10 +32,8 @@ def __init__(self, env): env (dict): loaded dictionary containing environment information """ self.env = env - self.client_lambda = boto3.client('lambda', - region_name=self.env['lambda_region']) - self.function = self.env['lambda_function_name'].replace( - '_streamalert_rule_processor', '_streamalert_alert_processor') + self.client_lambda = boto3.client('lambda', region_name=self.env['lambda_region']) + self.function = os.environ['ALERT_PROCESSOR'] def sink(self, alerts): """Sink triggered alerts from the StreamRules engine. diff --git a/stream_alert/shared/metrics.py b/stream_alert/shared/metrics.py index ef52147c2..08037ee15 100644 --- a/stream_alert/shared/metrics.py +++ b/stream_alert/shared/metrics.py @@ -27,8 +27,7 @@ # The FUNC_PREFIXES dict acts as a simple map to a human-readable name # Add ATHENA_PARTITION_REFRESH_NAME: 'AthenaPartitionRefresh', to the # below when metrics are supported there -FUNC_PREFIXES = {ALERT_PROCESSOR_NAME: 'AlertProcessor', - RULE_PROCESSOR_NAME: 'RuleProcessor'} +FUNC_PREFIXES = {RULE_PROCESSOR_NAME: 'RuleProcessor'} try: ENABLE_METRICS = bool(int(os.environ.get('ENABLE_METRICS', 0))) diff --git a/stream_alert_cli/config.py b/stream_alert_cli/config.py index 6e1a4b415..ad26b28ca 100644 --- a/stream_alert_cli/config.py +++ b/stream_alert_cli/config.py @@ -161,17 +161,21 @@ def toggle_metrics(self, enabled, clusters, lambda_functions): metrics on (rule, alert, or athena) """ for function in lambda_functions: - if function == metrics.ATHENA_PARTITION_REFRESH_NAME: + if function == metrics.ALERT_PROCESSOR_NAME: + self.config['lambda']['alert_processor_config']['enable_metrics'] = enabled + + elif function == metrics.ATHENA_PARTITION_REFRESH_NAME: if 'athena_partition_refresh_config' in self.config['lambda']: self.config['lambda']['athena_partition_refresh_config'] \ ['enable_metrics'] = enabled else: LOGGER_CLI.error('No Athena configuration found; please initialize first.') - continue - for cluster in clusters: - self.config['clusters'][cluster]['modules']['stream_alert'] \ - [function]['enable_metrics'] = enabled + else: + # Rule processor - toggle for each cluster + for cluster in clusters: + self.config['clusters'][cluster]['modules']['stream_alert'] \ + [function]['enable_metrics'] = enabled self.write() @@ -240,7 +244,7 @@ def _add_metric_alarm_per_cluster(self, alarm_info, function_name): cluster.upper()) new_alarms = self._add_metric_alarm_config(alarm_settings, metric_alarms) - if new_alarms != False: + if new_alarms is not False: function_config['metric_alarms'] = new_alarms LOGGER_CLI.info('Successfully added \'%s\' metric alarm for the \'%s\' ' 'function to \'conf/clusters/%s.json\'.', @@ -259,7 +263,7 @@ def _alarm_exists(self, alarm_name): message = ('CloudWatch metric alarm names must be unique ' 'within each AWS account. Please remove this alarm ' 'so it can be updated or choose another name.') - funcs = {metrics.ALERT_PROCESSOR_NAME, metrics.RULE_PROCESSOR_NAME} + funcs = {metrics.RULE_PROCESSOR_NAME} for func in funcs: for cluster in self.config['clusters']: func_alarms = ( @@ -279,8 +283,8 @@ def _alarm_exists(self, alarm_name): if not metric_alarms: return False - # Check for athena metric alarms also, which are save in the global config - funcs.add(metrics.ATHENA_PARTITION_REFRESH_NAME) + # Check for functions saved in the global config. + funcs.update({metrics.ALERT_PROCESSOR_NAME, metrics.ATHENA_PARTITION_REFRESH_NAME}) for func in funcs: global_func_alarms = global_config['metric_alarms'].get(func, {}) @@ -313,15 +317,15 @@ def add_metric_alarm(self, alarm_info): # Do not continue if the user is trying to apply a metric alarm for an athena # metric to a specific cluster (since the athena function operates on all clusters) - if (alarm_info['metric_target'] != 'aggregate' - and metric_function == metrics.ATHENA_PARTITION_REFRESH_NAME): - LOGGER_CLI.error('Metrics for the athena function can only be applied ' + if (alarm_info['metric_target'] != 'aggregate' and metric_function in { + metrics.ALERT_PROCESSOR_NAME, metrics.ATHENA_PARTITION_REFRESH_NAME}): + LOGGER_CLI.error('Metrics for the athena and alert functions can only be applied ' 'to an aggregate metric target, not on a per-cluster basis.') return - # If the metric is related to either the rule processor or alert processor, we should + # If the metric is related to the rule processor, we should # check to see if any cluster has metrics enabled for that function before continuing - if (metric_function in {metrics.ALERT_PROCESSOR_NAME, metrics.RULE_PROCESSOR_NAME} and + if (metric_function == metrics.RULE_PROCESSOR_NAME and not any(self.config['clusters'][cluster]['modules']['stream_alert'][metric_function] .get('enable_metrics') for cluster in self.config['clusters'])): prompt = ('Metrics are not currently enabled for the \'{}\' function ' @@ -353,8 +357,8 @@ def add_metric_alarm(self, alarm_info): return # Add metric alarms for the aggregate metrics - these are added to the global config - if (alarm_info['metric_target'] == 'aggregate' - or metric_function == metrics.ATHENA_PARTITION_REFRESH_NAME): + if (alarm_info['metric_target'] == 'aggregate' or metric_function in { + metrics.ALERT_PROCESSOR_NAME, metrics.ATHENA_PARTITION_REFRESH_NAME}): global_config = self.config['global']['infrastructure']['monitoring'] metric_alarms = global_config.get('metric_alarms', {}) @@ -371,7 +375,7 @@ def add_metric_alarm(self, alarm_info): alarm_info['metric_name']) new_alarms = self._add_metric_alarm_config(alarm_settings, metric_alarms) - if new_alarms != False: + if new_alarms is not False: global_config['metric_alarms'][metric_function] = new_alarms LOGGER_CLI.info('Successfully added \'%s\' metric alarm to ' '\'conf/global.json\'.', alarm_settings['alarm_name']) @@ -543,7 +547,7 @@ def _config_reader(self, key, file_path, **kwargs): else: # For certain log types (csv), the order of the schema # must be retained. By loading as an OrderedDict, - # the configuration is gauaranteed to keep its order. + # the configuration is guaranteed to keep its order. if key == 'logs': self.config[key] = json.load(data, object_pairs_hook=OrderedDict) else: diff --git a/stream_alert_cli/helpers.py b/stream_alert_cli/helpers.py index 0aa8ae62f..c67e2c8d1 100644 --- a/stream_alert_cli/helpers.py +++ b/stream_alert_cli/helpers.py @@ -587,7 +587,7 @@ def get_context_from_config(cluster, config): prefix = config['global']['account']['prefix'] account = config['global']['account']['aws_account_id'] region = config['global']['account']['region'] - function_name = '{}_{}_streamalert_alert_processor'.format(prefix, cluster) + function_name = '{}_streamalert_alert_processor'.format(prefix) arn = 'arn:aws:lambda:{}:{}:function:{}:testing'.format( region, account, function_name) diff --git a/stream_alert_cli/manage_lambda/deploy.py b/stream_alert_cli/manage_lambda/deploy.py index 01cd90ec4..fcbce59db 100644 --- a/stream_alert_cli/manage_lambda/deploy.py +++ b/stream_alert_cli/manage_lambda/deploy.py @@ -36,7 +36,7 @@ def _publish_version(packages, config, clusters): Returns: bool: Result of Lambda version publishing """ - global_packages = {'athena_partition_refresh', 'threat_intel_downloader'} + global_packages = {'alert_processor', 'athena_partition_refresh', 'threat_intel_downloader'} for package in packages: if package.package_name in global_packages: @@ -67,7 +67,7 @@ def _create_and_upload(function_name, config, cluster=None): package_mapping = { 'alert': PackageMap( stream_alert_packages.AlertProcessorPackage, - {'module.stream_alert_{}'.format(cluster) for cluster in clusters}, + {'module.alert_processor_lambda'}, True), 'apps': PackageMap( stream_alert_packages.AppIntegrationPackage, diff --git a/stream_alert_cli/manage_lambda/rollback.py b/stream_alert_cli/manage_lambda/rollback.py index 0c7e3b563..1e11272e9 100644 --- a/stream_alert_cli/manage_lambda/rollback.py +++ b/stream_alert_cli/manage_lambda/rollback.py @@ -14,9 +14,85 @@ limitations under the License. """ from stream_alert_cli import helpers +from stream_alert_cli.logger import LOGGER_CLI from stream_alert_cli.terraform.generate import terraform_generate +def _decrement_version(lambda_config): + """Decrement the Lambda version, if possible. + + Args: + lambda_config (dict): Lambda function config with 'current_version' + + Returns: + True if the version was changed, False otherwise + """ + current_version = lambda_config['current_version'] + if current_version == '$LATEST': + return False + + int_version = int(current_version) + if int_version <= 1: + return False + + lambda_config['current_version'] = int_version - 1 + return True + + +def _try_decrement_version(lambda_config, function_name): + """Log a warning if the lambda version cannot be rolled back.""" + changed = _decrement_version(lambda_config) + if not changed: + LOGGER_CLI.warn('%s cannot be rolled back from version %s', + function_name, str(lambda_config['current_version'])) + return changed + + +def _rollback_alert(config): + """Decrement the current_version for the alert processor.""" + lambda_config = config['lambda']['alert_processor_config'] + if _try_decrement_version(lambda_config, 'alert_processor'): + return ['module.alert_processor_lambda'] + + +def _rollback_apps(config, clusters): + """Decrement the current_version for all of the apps functions in the given clusters.""" + tf_targets = [] + + for cluster in clusters: + apps_config = config['clusters'][cluster]['modules'].get('stream_alert_apps', {}) + for lambda_name, lambda_config in apps_config.iteritems(): + clustered_name = '{}_{}'.format(lambda_name, cluster) + if _try_decrement_version(lambda_config, clustered_name): + tf_targets.append('module.{}'.format(clustered_name)) + + return tf_targets + + +def _rollback_athena(config): + """Decrement the current_version for the Athena Partition Refresh function.""" + lambda_config = config['lambda'].get('athena_partition_refresh_config') + if lambda_config and _try_decrement_version(lambda_config, 'athena_partition_refresh'): + return['module.stream_alert_athena'] + + +def _rollback_downloader(config): + """Decrement the current_version for the Threat Intel Downloader function.""" + lambda_config = config['lambda'].get('threat_intel_downloader_config') + if lambda_config and _try_decrement_version(lambda_config, 'threat_intel_downloader_config'): + return['module.threat_intel_downloader'] + + +def _rollback_rule(config, clusters): + """Decrement the current_version for the Rule Processor in each of the given clusters""" + tf_targets = [] + for cluster in clusters: + lambda_config = config['clusters'][cluster]['modules']['stream_alert']['rule_processor'] + if _try_decrement_version(lambda_config, 'rule_processor_{}'.format(cluster)): + tf_targets.append('module.stream_alert_{}'.format(cluster)) + return tf_targets + + def rollback(options, config): """Rollback the current production AWS Lambda version by 1 @@ -25,32 +101,30 @@ def rollback(options, config): Only rollsback if published version is greater than 1 """ clusters = options.clusters or config.clusters() + rollback_all = 'all' in options.processor + tf_targets = [] - if 'all' in options.processor: - lambda_functions = {'rule_processor', 'alert_processor', 'athena_partition_refresh'} - else: - lambda_functions = { - '{}_processor'.format(proc) - for proc in options.processor if proc != 'athena' - } - if 'athena' in options.processor: - lambda_functions.add('athena_partition_refresh') + if rollback_all or 'alert' in options.processor: + tf_targets.extend(_rollback_alert(config) or []) - for cluster in clusters: - for lambda_function in lambda_functions: - stream_alert_key = config['clusters'][cluster]['modules']['stream_alert'] - current_vers = stream_alert_key[lambda_function]['current_version'] - if current_vers != '$LATEST': - current_vers = int(current_vers) - if current_vers > 1: - new_vers = current_vers - 1 - config['clusters'][cluster]['modules']['stream_alert'][lambda_function][ - 'current_version'] = new_vers - config.write() - - targets = ['module.stream_alert_{}'.format(x) for x in config.clusters()] + if rollback_all or 'apps' in options.processor: + tf_targets.extend(_rollback_apps(config, clusters) or []) + + if rollback_all or 'athena' in options.processor: + tf_targets.extend(_rollback_athena(config) or []) + + if rollback_all or 'rule' in options.processor: + tf_targets.extend(_rollback_rule(config, clusters) or []) + + if rollback_all or 'threat_intel_downloader' in options.processor: + tf_targets.extend(_rollback_downloader(config) or []) + + if not tf_targets: # No changes made + return + + config.write() if not terraform_generate(config=config): return - helpers.tf_runner(targets=targets) + helpers.tf_runner(targets=sorted(tf_targets)) diff --git a/stream_alert_cli/terraform/_common.py b/stream_alert_cli/terraform/_common.py index 34d8db74a..208e046c2 100644 --- a/stream_alert_cli/terraform/_common.py +++ b/stream_alert_cli/terraform/_common.py @@ -27,3 +27,20 @@ class InvalidClusterName(Exception): def infinitedict(): """Create arbitrary levels of dictionary key/values""" return defaultdict(infinitedict) + + +def monitoring_topic_arn(config): + """Return the ARN of the monitoring SNS topic""" + infrastructure_config = config['global']['infrastructure'] + + topic_name = ( + DEFAULT_SNS_MONITORING_TOPIC + if infrastructure_config['monitoring'].get('create_sns_topic') + else infrastructure_config['monitoring']['sns_topic_name'] + ) + + return 'arn:aws:sns:{region}:{account_id}:{topic}'.format( + region=config['global']['account']['region'], + account_id=config['global']['account']['aws_account_id'], + topic=topic_name + ) diff --git a/stream_alert_cli/terraform/alert_processor.py b/stream_alert_cli/terraform/alert_processor.py new file mode 100644 index 000000000..c6ab5d0ba --- /dev/null +++ b/stream_alert_cli/terraform/alert_processor.py @@ -0,0 +1,112 @@ +""" +Copyright 2017-present, Airbnb Inc. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +from stream_alert_cli.terraform._common import infinitedict, monitoring_topic_arn + + +def generate_alert_processor(config): + """Generate Terraform for the Alert Processor + + Args: + config (dict): The loaded config from the 'conf/' directory + + Example Alert Processor config: + "alert_processor_config": { + "current_version": "$LATEST", + "handler": "main.handler", + "log_level": "info", + "log_retention_days": 14, + "memory": 128, + "metric_alarms": { + "enabled": True, + "errors_alarm_threshold": 0, + "errors_alarm_evaluation_periods": 1, + "errors_alarm_period_secs": 120, + "throttles_alarm_threshold": 0, + "throttles_alarm_evaluation_periods": 1, + "throttles_alarm_period_secs": 120 + }, + "source_bucket": "BUCKET", + "source_object_key": "OBJECT_KEY", + "outputs": { + "aws-lambda": [ + "lambda_function_name" + ], + "aws-s3": [ + "s3.bucket.name" + ] + }, + "timeout": 10, + "vpc_config": { + "security_group_ids": [ + "sg-id" + ], + "subnet_ids": [ + "subnet-id" + ] + } + } + + Returns: + dict: Alert Processor dict to be marshaled to JSON + """ + prefix = config['global']['account']['prefix'] + alert_processor_config = config['lambda']['alert_processor_config'] + alarms_config = alert_processor_config.get('metric_alarms', {}) + outputs_config = alert_processor_config.get('outputs', {}) + vpc_config = alert_processor_config.get('vpc_config', {}) + + result = infinitedict() + + # Set variables for the IAM permissions module + result['module']['alert_processor_iam'] = { + 'source': 'modules/tf_alert_processor_iam', + 'account_id': config['global']['account']['aws_account_id'], + 'region': config['global']['account']['region'], + 'prefix': prefix, + 'role_id': '${module.alert_processor_lambda.role_id}', + 'kms_key_arn': '${aws_kms_key.stream_alert_secrets.arn}', + 'output_lambda_functions': outputs_config.get('aws-lambda', []), + 'output_s3_buckets': outputs_config.get('aws-s3', []) + } + + # Set variables for the Lambda module + lambda_module = { + 'source': 'modules/tf_lambda', + 'function_name': '{}_streamalert_alert_processor'.format(prefix), + 'description': 'StreamAlert Alert Processor', + 'handler': alert_processor_config['handler'], + 'memory_size_mb': alert_processor_config['memory'], + 'timeout_sec': alert_processor_config['timeout'], + 'source_bucket': alert_processor_config['source_bucket'], + 'source_object_key': alert_processor_config['source_object_key'], + 'environment_variables': { + 'LOGGER_LEVEL': alert_processor_config.get('log_level', 'info') + }, + 'vpc_subnet_ids': vpc_config.get('subnet_ids', []), + 'vpc_security_group_ids': vpc_config.get('security_group_ids', []), + 'aliased_version': alert_processor_config['current_version'], + 'log_retention_days': alert_processor_config.get('log_retention_days', 14) + } + + # Add metric alarms configuration + if alarms_config.get('enabled', True): + lambda_module['enable_metric_alarms'] = True + lambda_module['alarm_actions'] = [monitoring_topic_arn(config)] + for var_name, var_value in alarms_config.iteritems(): + if 'errors' in var_name or 'throttles' in var_name: + lambda_module[var_name] = var_value + else: + lambda_module['enable_metric_alarms'] = False + + result['module']['alert_processor_lambda'] = lambda_module + return result diff --git a/stream_alert_cli/terraform/generate.py b/stream_alert_cli/terraform/generate.py index ad17f920a..e6dab93b8 100644 --- a/stream_alert_cli/terraform/generate.py +++ b/stream_alert_cli/terraform/generate.py @@ -21,9 +21,12 @@ from stream_alert.shared.metrics import FUNC_PREFIXES from stream_alert_cli.logger import LOGGER_CLI from stream_alert_cli.terraform._common import ( + DEFAULT_SNS_MONITORING_TOPIC, InvalidClusterName, - infinitedict + infinitedict, + monitoring_topic_arn ) +from stream_alert_cli.terraform.alert_processor import generate_alert_processor from stream_alert_cli.terraform.app_integrations import generate_app_integrations from stream_alert_cli.terraform.athena import generate_athena from stream_alert_cli.terraform.cloudtrail import generate_cloudtrail @@ -40,10 +43,10 @@ from stream_alert_cli.terraform.s3_events import generate_s3_events from stream_alert_cli.terraform.threat_intel_downloader import generate_threat_intel_downloader -DEFAULT_SNS_MONITORING_TOPIC = 'stream_alert_monitoring' RESTRICTED_CLUSTER_NAMES = ('main', 'athena') TERRAFORM_VERSIONS = {'application': '~> 0.10.6', 'provider': {'aws': '~> 1.5.0'}} + def generate_s3_bucket(**kwargs): """Generate an S3 Bucket dict @@ -105,7 +108,7 @@ def generate_main(**kwargs): # Configure Terraform version requirement main_dict['terraform']['required_version'] = TERRAFORM_VERSIONS['application'] - # Setup the Backend dependencing on the deployment phase. + # Setup the Backend depending on the deployment phase. # When first setting up StreamAlert, the Terraform statefile # is stored locally. After the first dependencies are created, # this moves to S3. @@ -195,15 +198,7 @@ def generate_main(**kwargs): if not global_metrics: return main_dict - topic_name = (DEFAULT_SNS_MONITORING_TOPIC if infrastructure_config - ['monitoring'].get('create_sns_topic') else - infrastructure_config['monitoring'].get('sns_topic_name')) - - sns_topic_arn = 'arn:aws:sns:{region}:{account_id}:{topic}'.format( - region=config['global']['account']['region'], - account_id=config['global']['account']['aws_account_id'], - topic=topic_name - ) + sns_topic_arn = monitoring_topic_arn(config) formatted_alarms = {} # Add global metric alarms for the rule and alert processors @@ -386,10 +381,21 @@ def terraform_generate(config, init=False): message='Removing old Threat Intel Downloader Terraform file' ) + # Setup Alert Processor + generate_global_lambda_settings( + config, + config_name='alert_processor_config', + config_generate_func=generate_alert_processor, + tf_tmp_file='terraform/alert_processor.tf.json', + message='Removing old Alert Processor Terraform file' + ) + return True + def generate_global_lambda_settings(config, **kwargs): - """Generate settings of global Lambda funcitons, Athena and Threat Intel Downloader + """Generate settings for global Lambda functions + Args: config (dict): lambda function settings read from 'conf/' directory @@ -402,7 +408,7 @@ def generate_global_lambda_settings(config, **kwargs): config_name = kwargs.get('config_name') tf_tmp_file = kwargs.get('tf_tmp_file') if config_name and config['lambda'].get(config_name) and tf_tmp_file: - if config['lambda'].get(config_name)['enabled']: + if config['lambda'][config_name].get('enabled', True): generated_config = kwargs.get('config_generate_func')(config=config) if generated_config: with open(tf_tmp_file, 'w') as tf_file: diff --git a/stream_alert_cli/terraform/metrics.py b/stream_alert_cli/terraform/metrics.py index 5632d03f4..b330718ad 100644 --- a/stream_alert_cli/terraform/metrics.py +++ b/stream_alert_cli/terraform/metrics.py @@ -15,7 +15,7 @@ """ from stream_alert.shared import metrics from stream_alert_cli.logger import LOGGER_CLI -from stream_alert_cli.terraform._common import DEFAULT_SNS_MONITORING_TOPIC +from stream_alert_cli.terraform._common import monitoring_topic_arn def generate_cloudwatch_metric_filters(cluster_name, cluster_dict, config): @@ -108,15 +108,7 @@ def generate_cloudwatch_metric_alarms(cluster_name, cluster_dict, config): 'Invalid config: Make sure you declare global infrastructure options!') return - topic_name = (DEFAULT_SNS_MONITORING_TOPIC if infrastructure_config - ['monitoring'].get('create_sns_topic') else - infrastructure_config['monitoring'].get('sns_topic_name')) - - sns_topic_arn = 'arn:aws:sns:{region}:{account_id}:{topic}'.format( - region=config['global']['account']['region'], - account_id=config['global']['account']['aws_account_id'], - topic=topic_name - ) + sns_topic_arn = monitoring_topic_arn(config) cluster_dict['module']['stream_alert_{}'.format( cluster_name)]['sns_topic_arn'] = sns_topic_arn diff --git a/stream_alert_cli/terraform/monitoring.py b/stream_alert_cli/terraform/monitoring.py index 48bb776e6..47e95190c 100644 --- a/stream_alert_cli/terraform/monitoring.py +++ b/stream_alert_cli/terraform/monitoring.py @@ -14,6 +14,7 @@ limitations under the License. """ from stream_alert_cli.logger import LOGGER_CLI +from stream_alert_cli.terraform._common import monitoring_topic_arn def generate_monitoring(cluster_name, cluster_dict, config): @@ -43,7 +44,6 @@ def generate_monitoring(cluster_name, cluster_dict, config): prefix = config['global']['account']['prefix'] infrastructure_config = config['global'].get('infrastructure') monitoring_config = config['clusters'][cluster_name]['modules']['cloudwatch_monitoring'] - sns_topic_arn = None if not (infrastructure_config and 'monitoring' in infrastructure_config): LOGGER_CLI.error('Invalid config: Make sure you declare global infrastructure options!') @@ -53,16 +53,7 @@ def generate_monitoring(cluster_name, cluster_dict, config): LOGGER_CLI.info('CloudWatch Monitoring not enabled, skipping...') return True - if infrastructure_config['monitoring'].get('create_sns_topic'): - topic_name = 'stream_alert_monitoring' - - elif infrastructure_config['monitoring'].get('sns_topic_name'): - topic_name = infrastructure_config['monitoring']['sns_topic_name'] - - sns_topic_arn = 'arn:aws:sns:{region}:{account_id}:{topic}'.format( - region=config['global']['account']['region'], - account_id=config['global']['account']['aws_account_id'], - topic=topic_name) + sns_topic_arn = monitoring_topic_arn(config) cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)] = { 'source': 'modules/tf_stream_alert_monitoring', @@ -73,10 +64,7 @@ def generate_monitoring(cluster_name, cluster_dict, config): if monitoring_config.get('lambda_alarms_enabled', True): cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)].update({ - 'lambda_functions': [ - '{}_{}_streamalert_rule_processor'.format(prefix, cluster_name), - '{}_{}_streamalert_alert_processor'.format(prefix, cluster_name) - ], + 'lambda_functions': ['{}_{}_streamalert_rule_processor'.format(prefix, cluster_name)], 'lambda_alarms_enabled': True }) @@ -88,7 +76,7 @@ def generate_monitoring(cluster_name, cluster_dict, config): # Add support for custom settings for tweaking alarm thresholds, eval periods, and periods # Note: This does not strictly check for proper variable names, since there are so many. - # Instead, Terraform will error out if an imporper name is used. + # Instead, Terraform will error out if an improper name is used. # Also, every value in these settings should be a string, so cast for safety. for setting_name, setting_value in monitoring_config.get('settings', {}).iteritems(): cluster_dict['module']['cloudwatch_monitoring_{}'.format( diff --git a/stream_alert_cli/terraform/streamalert.py b/stream_alert_cli/terraform/streamalert.py index bf03e2687..e9df2f450 100644 --- a/stream_alert_cli/terraform/streamalert.py +++ b/stream_alert_cli/terraform/streamalert.py @@ -26,28 +26,6 @@ def generate_stream_alert(cluster_name, cluster_dict, config): JSON Input from the config: "stream_alert": { - "alert_processor": { - "current_version": "$LATEST", - "log_level": "info", - "memory": 128, - "outputs": { - "aws-lambda": [ - "lambda_function_name" - ], - "aws-s3": [ - "s3.bucket.name" - ] - }, - "timeout": 10, - "vpc_config": { - "security_group_ids": [ - "sg-id" - ], - "subnet_ids": [ - "subnet-id" - ] - } - }, "rule_processor": { "current_version": "$LATEST", "inputs": { @@ -73,7 +51,6 @@ def generate_stream_alert(cluster_name, cluster_dict, config): 'region': config['clusters'][cluster_name]['region'], 'prefix': account['prefix'], 'cluster': cluster_name, - 'kms_key_arn': '${aws_kms_key.stream_alert_secrets.arn}', 'rule_processor_enable_metrics': modules['stream_alert'] \ ['rule_processor'].get('enable_metrics', True), 'rule_processor_log_level': modules['stream_alert'] \ @@ -81,15 +58,7 @@ def generate_stream_alert(cluster_name, cluster_dict, config): 'rule_processor_memory': modules['stream_alert']['rule_processor']['memory'], 'rule_processor_timeout': modules['stream_alert']['rule_processor']['timeout'], 'rule_processor_version': modules['stream_alert']['rule_processor']['current_version'], - 'rule_processor_config': '${var.rule_processor_config}', - 'alert_processor_config': '${var.alert_processor_config}', - 'alert_processor_enable_metrics': modules['stream_alert'] \ - ['alert_processor'].get('enable_metrics', True), - 'alert_processor_log_level': modules['stream_alert'] \ - ['alert_processor'].get('log_level', 'info'), - 'alert_processor_memory': modules['stream_alert']['alert_processor']['memory'], - 'alert_processor_timeout': modules['stream_alert']['alert_processor']['timeout'], - 'alert_processor_version': modules['stream_alert']['alert_processor']['current_version'] + 'rule_processor_config': '${var.rule_processor_config}' } if (config['global'].get('threat_intel') @@ -98,19 +67,6 @@ def generate_stream_alert(cluster_name, cluster_dict, config): ['dynamodb_ioc_table'] = config['global']['threat_intel']['dynamodb_table'] cluster_dict['module']['stream_alert_{}'.format(cluster_name)] \ ['threat_intel_enabled'] = config['global']['threat_intel']['enabled'] - # Add Alert Processor output config from the loaded cluster file - output_config = modules['stream_alert']['alert_processor'].get('outputs') - if output_config: - # Mapping of Terraform input variables to output config variables - output_mapping = { - 'output_lambda_functions': 'aws-lambda', - 'output_s3_buckets': 'aws-s3' - } - for tf_key, output in output_mapping.items(): - if output in output_config: - cluster_dict['module']['stream_alert_{}'.format(cluster_name)].update({ - tf_key: modules['stream_alert']['alert_processor']['outputs'][output] - }) # Add Rule Processor input config from the loaded cluster file input_config = modules['stream_alert']['rule_processor'].get('inputs') @@ -124,13 +80,4 @@ def generate_stream_alert(cluster_name, cluster_dict, config): tf_key: input_config[input_key] }) - # Add the Alert Processor VPC config from the loaded cluster file - vpc_config = modules['stream_alert']['alert_processor'].get('vpc_config') - if vpc_config: - cluster_dict['module']['stream_alert_{}'.format(cluster_name)].update({ - 'alert_processor_vpc_enabled': True, - 'alert_processor_vpc_subnet_ids': vpc_config['subnet_ids'], - 'alert_processor_vpc_security_group_ids': vpc_config['security_group_ids'] - }) - return True diff --git a/stream_alert_cli/test.py b/stream_alert_cli/test.py index d6f8c4829..04c742369 100644 --- a/stream_alert_cli/test.py +++ b/stream_alert_cli/test.py @@ -924,6 +924,10 @@ def run_tests(options, context): options (namedtuple): CLI options (debug, processor, etc) context (namedtuple): A constructed aws context object """ + # The Rule Processor sink is instantiated with the alert processor from the environment: + os.environ['ALERT_PROCESSOR'] = '{}_{}'.format( + config['global']['account']['prefix'], '_streamalert_alert_processor') + if options.debug: # TODO(jack): Currently there is no (clean) way to set # the logger formatter to provide more verbose diff --git a/terraform/modules/tf_alert_processor_iam/README.md b/terraform/modules/tf_alert_processor_iam/README.md new file mode 100644 index 000000000..393a0a546 --- /dev/null +++ b/terraform/modules/tf_alert_processor_iam/README.md @@ -0,0 +1,5 @@ +# Alert Processor Permissions +This module adds IAM permissions specific to the alert processor: + * Reading and decrypting output secrets + * Invoking Lambda outputs + * Writing to S3 outputs diff --git a/terraform/modules/tf_alert_processor_iam/main.tf b/terraform/modules/tf_alert_processor_iam/main.tf new file mode 100644 index 000000000..5e3fc3b8d --- /dev/null +++ b/terraform/modules/tf_alert_processor_iam/main.tf @@ -0,0 +1,111 @@ +// Permissions specific to the alert processor: decrypting secrets, sending alerts to outputs + +locals { + firehose_arn_prefix = "arn:aws:firehose:${var.region}:${var.account_id}" + lambda_arn_prefix = "arn:aws:lambda:${var.region}:${var.account_id}:function" +} + +// Allow the Alert Processor to retrieve and decrypt output secrets +resource "aws_iam_role_policy" "output_secrets" { + name = "DecryptOutputSecrets" + role = "${var.role_id}" + policy = "${data.aws_iam_policy_document.output_secrets.json}" +} + +data "aws_iam_policy_document" "output_secrets" { + // Allow decrypting output secrets + statement { + effect = "Allow" + + actions = [ + "kms:Decrypt", + "kms:DescribeKey", + ] + + resources = ["${var.kms_key_arn}"] + } + + // Allow retrieving encrypted output secrets + statement { + effect = "Allow" + actions = ["s3:GetObject"] + resources = ["arn:aws:s3:::${var.prefix}.streamalert.secrets/*"] + } +} + +// Allow the Alert Processor to send to default firehose and S3 outputs +resource "aws_iam_role_policy" "default_outputs" { + name = "SinkToDefaultOutputs" + role = "${var.role_id}" + policy = "${data.aws_iam_policy_document.default_outputs.json}" +} + +data "aws_iam_policy_document" "default_outputs" { + // Allow sending alerts to default firehose output + statement { + effect = "Allow" + actions = ["firehose:Put*"] + resources = ["${local.firehose_arn_prefix}:deliverystream/${var.prefix}_streamalert_alert_delivery"] + } + + // Allow saving alerts to the default .streamalerts bucket + statement { + effect = "Allow" + + actions = [ + "s3:ListBucket", + "s3:PutObject", + "s3:PutObjectAcl", + ] + + resources = [ + "arn:aws:s3:::${var.prefix}.streamalerts", + "arn:aws:s3:::${var.prefix}.streamalerts/*", + ] + } +} + +// Allow the Alert Processor to invoke the configured output Lambda functions +resource "aws_iam_role_policy" "invoke_lambda_outputs" { + count = "${length(var.output_lambda_functions)}" + name = "LambdaInvoke_${element(var.output_lambda_functions, count.index)}" + role = "${var.role_id}" + policy = "${element(data.aws_iam_policy_document.invoke_lambda_outputs.*.json, count.index)}" +} + +data "aws_iam_policy_document" "invoke_lambda_outputs" { + count = "${length(var.output_lambda_functions)}" + + statement { + effect = "Allow" + actions = ["lambda:InvokeFunction"] + resources = ["${local.lambda_arn_prefix}:${element(var.output_lambda_functions, count.index)}"] + } +} + +// Allow the Alert Processor to write alerts to the configured output S3 buckets +resource "aws_iam_role_policy" "write_to_s3_outputs" { + count = "${length(var.output_s3_buckets)}" + name = "S3PutObject_${element(var.output_s3_buckets, count.index)}" + role = "${var.role_id}" + policy = "${element(data.aws_iam_policy_document.write_to_s3_outputs.*.json, count.index)}" +} + +data "aws_iam_policy_document" "write_to_s3_outputs" { + count = "${length(var.output_lambda_functions)}" + + statement { + effect = "Allow" + + actions = [ + "s3:ListBucket", + "s3:PutObject", + "s3:PutObjectAcl", + ] + + resources = [ + "arn:aws:s3:::${element(var.output_s3_buckets, count.index)}", + "arn:aws:s3:::${element(var.output_s3_buckets, count.index)}/*", + ] + } +} diff --git a/terraform/modules/tf_alert_processor_iam/variables.tf b/terraform/modules/tf_alert_processor_iam/variables.tf new file mode 100644 index 000000000..566977085 --- /dev/null +++ b/terraform/modules/tf_alert_processor_iam/variables.tf @@ -0,0 +1,31 @@ +variable "account_id" { + description = "12-digit AWS Account ID" +} + +variable "region" { + description = "AWS region identifier" +} + +variable "prefix" { + description = "Prefix for resource names" +} + +variable "role_id" { + description = "Alert processor IAM Role ID" +} + +variable "kms_key_arn" { + description = "KMS key ARN used for encrypting output secrets" +} + +variable "output_lambda_functions" { + type = "list" + default = [] + description = "Optional list of configured Lambda outputs (function names)" +} + +variable "output_s3_buckets" { + type = "list" + default = [] + description = "Optional list of configured S3 bucket outputs (bucket names)" +} diff --git a/terraform/modules/tf_stream_alert/README.md b/terraform/modules/tf_stream_alert/README.md index eb5d25d14..e864c52d7 100644 --- a/terraform/modules/tf_stream_alert/README.md +++ b/terraform/modules/tf_stream_alert/README.md @@ -1,17 +1,10 @@ -# Stream Alert Terraform Module -* This Terraform module creates the main AWS Lambda functions to match rules and send alerts. +# StreamAlert Terraform Module +This Terraform module creates the rule processor and its associated components: -## Components -* S3 buckets: - * StreamAlert Lambda source code. - * Bucket to store alerts from the Output processor. - -* AWS Lambda Functions: - * StreamAlert processor - * StreamAlert output processor - * Each with a ``production`` Lambda alias - -* IAM roles/policies +* CloudWatch log group, metric alarms, and metric filters +* IAM role and policies +* Lambda alias: "production" +* SNS topic subscriptions ## Example ``` @@ -25,91 +18,3 @@ module "stream_alert" { lambda_handler = "main.lambda_handler" } ``` - -## Inputs - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PropertyDescriptionDefaultRequired
account_idYour AWS Account IDNoneTrue
alert_processor_vpc_enabledTo enable/disable placing the Alert Processor inside a VPCFalseFalse/td> -
alert_processor_vpc_subnet_idsThe subnet IDs to place the Alert Processor[]False
alert_processor_vpc_security_group_idsThe security group IDs to assign to the Alert Processor[]False
regionThe AWS region for your streamNoneTrue
lambda_source_bucket_nameThe name of the S3 bucket to store lambda deployment packagesNoneTrue
lambda_function_nameThe name of the stream alert lambda functionstream_alert_processorFalse
lambda_timeoutThe max runtime in seconds for the lambda function10False
lambda_memoryThe memory allocation in MB for the lambda function/aws/kinesisfirehose/stream_alertFalse
- -## Outputs - - - - - - - - - - - - - - - - - - - - - -
PropertyDescription
lambda_arnThe ARN of the StreamAlert lambda function
lambda_role_idThe ID of the StreamAlert IAM execution role
lambda_role_arnThe ARN of the StreamAlert IAM execution role
sns_topic_arnThe ARN of the SNS topic for operational monitoring
diff --git a/terraform/modules/tf_stream_alert/iam.tf b/terraform/modules/tf_stream_alert/iam.tf index 92e458eb6..974f60259 100644 --- a/terraform/modules/tf_stream_alert/iam.tf +++ b/terraform/modules/tf_stream_alert/iam.tf @@ -18,12 +18,9 @@ data "aws_iam_policy_document" "lambda_assume_role_policy" { } } -// IAM Role Policy: Allow the Rule Processor to write CloudWatch logs -resource "aws_iam_role_policy" "streamalert_rule_processor_cloudwatch" { - name = "CloudwatchWriteLogs" - role = "${aws_iam_role.streamalert_rule_processor_role.id}" - - policy = "${data.aws_iam_policy_document.alert_processor_cloudwatch.json}" +resource "aws_iam_role_policy_attachment" "stream_alert_rule_processor_cloudwatch" { + role = "${aws_iam_role.streamalert_rule_processor_role.id}" + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" } // IAM Role Policy: Allow the Rule Processor to invoke the Alert Processor @@ -45,7 +42,7 @@ data "aws_iam_policy_document" "rule_processor_invoke_alert_proc" { # Use interpolation because of the different VPC/non vpc resources resources = [ - "arn:aws:lambda:${var.region}:${var.account_id}:function:${var.prefix}_${var.cluster}_streamalert_alert_processor", + "arn:aws:lambda:${var.region}:${var.account_id}:function:${var.prefix}_streamalert_alert_processor", ] } } @@ -98,197 +95,3 @@ data "aws_iam_policy_document" "streamalert_rule_processor_read_dynamodb" { ] } } - -// IAM Role: Alert Processor Execution Role -resource "aws_iam_role" "streamalert_alert_processor_role" { - name = "${var.prefix}_${var.cluster}_streamalert_alert_processor_role" - - assume_role_policy = "${data.aws_iam_policy_document.lambda_assume_role_policy.json}" -} - -// IAM Role Policy: Allow the Alert Processor to decrypt secrets -resource "aws_iam_role_policy" "streamalert_alert_processor_kms" { - name = "KmsDecryptSecrets" - role = "${aws_iam_role.streamalert_alert_processor_role.id}" - - policy = "${data.aws_iam_policy_document.rule_processor_kms_decrypt.json}" -} - -// IAM Policy Doc: KMS key permissions for decryption -data "aws_iam_policy_document" "rule_processor_kms_decrypt" { - statement { - effect = "Allow" - - actions = [ - "kms:Decrypt", - "kms:DescribeKey", - ] - - resources = [ - "${var.kms_key_arn}", - ] - } -} - -// IAM Role Policy: Allow the Alert Processor to write objects to S3. -// The default S3 bucket is also created by this module. -resource "aws_iam_role_policy" "streamalert_alert_processor_s3" { - name = "S3WriteAlertsDefault" - role = "${aws_iam_role.streamalert_alert_processor_role.id}" - - policy = "${data.aws_iam_policy_document.alert_processor_s3.json}" -} - -// IAM Policy Doc: Allow fetching of secrets and putting of alerts -data "aws_iam_policy_document" "alert_processor_s3" { - statement { - effect = "Allow" - - actions = [ - "s3:PutObject", - "s3:PutObjectAcl", - "s3:ListBucket", - ] - - resources = [ - "arn:aws:s3:::${var.prefix}.streamalerts/*", - ] - } - - statement { - effect = "Allow" - - actions = [ - "s3:GetObject", - ] - - resources = [ - "arn:aws:s3:::${var.prefix}.streamalert.secrets/*", - ] - } -} - -// IAM Role Policy: Allow the Alert Processor to write CloudWatch logs -resource "aws_iam_role_policy" "streamalert_alert_processor_cloudwatch" { - name = "CloudwatchWriteLogs" - role = "${aws_iam_role.streamalert_alert_processor_role.id}" - - policy = "${data.aws_iam_policy_document.alert_processor_cloudwatch.json}" -} - -// IAM Policy Doc: Allow creating log groups and events in any CloudWatch stream -data "aws_iam_policy_document" "alert_processor_cloudwatch" { - statement { - effect = "Allow" - - actions = [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:PutLogEvents", - ] - - resources = [ - "*", - ] - } -} - -// IAM Role Policy: Allow the Alert Processor to invoke configured Lambda functions -resource "aws_iam_role_policy" "streamalert_alert_processor_lambda" { - count = "${length(var.output_lambda_functions)}" - name = "LambdaInvoke${count.index}" - role = "${aws_iam_role.streamalert_alert_processor_role.id}" - - policy = <, , -resource "aws_cloudwatch_log_metric_filter" "alert_processor_cw_metric_filters" { - count = "${length(var.alert_processor_metric_filters)}" - name = "${element(split(",", var.alert_processor_metric_filters[count.index]), 0)}" - pattern = "${element(split(",", var.alert_processor_metric_filters[count.index]), 1)}" - log_group_name = "${aws_cloudwatch_log_group.alert_processor.name}" - - metric_transformation { - name = "${element(split(",", var.alert_processor_metric_filters[count.index]), 0)}" - namespace = "${var.namespace}" - value = "${element(split(",", var.alert_processor_metric_filters[count.index]), 2)}" - } -} - // CloudWatch metric alarms that are created per-cluster // The split list is our way around poor tf support for lists of maps and is made up of: // , , , , diff --git a/terraform/modules/tf_stream_alert/sns.tf b/terraform/modules/tf_stream_alert/sns.tf index 703724efe..e7fafbb59 100644 --- a/terraform/modules/tf_stream_alert/sns.tf +++ b/terraform/modules/tf_stream_alert/sns.tf @@ -15,4 +15,5 @@ resource "aws_lambda_permission" "sns_inputs" { principal = "sns.amazonaws.com" source_arn = "${element(var.input_sns_topics, count.index)}" qualifier = "production" + depends_on = ["aws_lambda_alias.rule_processor_production"] } diff --git a/terraform/modules/tf_stream_alert/variables.tf b/terraform/modules/tf_stream_alert/variables.tf index 49cfc8134..fba44dc21 100644 --- a/terraform/modules/tf_stream_alert/variables.tf +++ b/terraform/modules/tf_stream_alert/variables.tf @@ -2,45 +2,6 @@ variable "account_id" { default = "" } -variable "alert_processor_config" { - type = "map" - default = {} -} - -variable "alert_processor_log_level" { - type = "string" - default = "info" -} - -variable "alert_processor_enable_metrics" { - default = false -} - -variable "alert_processor_version" {} - -variable "alert_processor_memory" {} - -variable "alert_processor_timeout" {} - -variable "alert_processor_vpc_enabled" { - default = false -} - -variable "alert_processor_vpc_subnet_ids" { - type = "list" - default = [] -} - -variable "alert_processor_vpc_security_group_ids" { - type = "list" - default = [] -} - -variable "alert_processor_metric_filters" { - type = "list" - default = [] -} - variable "cloudwatch_log_retention" { default = 60 } @@ -54,10 +15,6 @@ variable "input_sns_topics" { default = [] } -variable "kms_key_arn" { - type = "string" -} - variable "metric_alarms" { type = "list" default = [] @@ -68,16 +25,6 @@ variable "namespace" { default = "StreamAlert" } -variable "output_lambda_functions" { - type = "list" - default = [] -} - -variable "output_s3_buckets" { - type = "list" - default = [] -} - variable "prefix" { default = "" } diff --git a/terraform/variables.tf b/terraform/variables.tf index b90aeb088..75d4eaa76 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -3,26 +3,11 @@ variable "account" { default = {} } -variable "alert_processor_config" { - type = "map" - default = {} -} - -variable "alert_processor_lambda_config" { - type = "map" - default = {} -} - variable "stream_alert_apps_config" { type = "map" default = {} } -variable "alert_processor_versions" { - type = "map" - default = {} -} - variable "aws-lambda" { type = "map" default = {} diff --git a/tests/unit/conf/clusters/advanced.json b/tests/unit/conf/clusters/advanced.json index c992ce79c..831b77f05 100644 --- a/tests/unit/conf/clusters/advanced.json +++ b/tests/unit/conf/clusters/advanced.json @@ -43,27 +43,6 @@ } ], "stream_alert": { - "alert_processor": { - "current_version": "$LATEST", - "memory": 128, - "outputs": { - "aws-lambda": [ - "my-lambda-function:production" - ], - "aws-s3": [ - "my-s3-bucket.with.data" - ] - }, - "timeout": 25, - "vpc_config": { - "security_group_ids": [ - "sg-id-1" - ], - "subnet_ids": [ - "subnet-id-1" - ] - } - }, "rule_processor": { "current_version": "$LATEST", "inputs": { diff --git a/tests/unit/conf/clusters/test.json b/tests/unit/conf/clusters/test.json index cb12e1b48..b5b1bd889 100644 --- a/tests/unit/conf/clusters/test.json +++ b/tests/unit/conf/clusters/test.json @@ -23,11 +23,6 @@ } ], "stream_alert": { - "alert_processor": { - "current_version": "$LATEST", - "memory": 128, - "timeout": 25 - }, "rule_processor": { "current_version": "$LATEST", "enable_threat_intel": false, diff --git a/tests/unit/conf/clusters/trusted.json b/tests/unit/conf/clusters/trusted.json index 155f469d5..6bb7f57dc 100644 --- a/tests/unit/conf/clusters/trusted.json +++ b/tests/unit/conf/clusters/trusted.json @@ -26,11 +26,6 @@ } ], "stream_alert": { - "alert_processor": { - "current_version": "$LATEST", - "memory": 128, - "timeout": 25 - }, "rule_processor": { "current_version": "$LATEST", "enable_threat_intel": false, diff --git a/tests/unit/conf/lambda.json b/tests/unit/conf/lambda.json index 1bd78b076..5d9e16c52 100644 --- a/tests/unit/conf/lambda.json +++ b/tests/unit/conf/lambda.json @@ -1,6 +1,40 @@ { "alert_processor_config": { - "source_bucket": "unit.testing.source.bucket" + "current_version": "$LATEST", + "handler": "main.handler", + "log_level": "info", + "log_retention_days": 7, + "memory": 128, + "metric_alarms": { + "enabled": true, + "errors_alarm_evaluation_periods": 1, + "errors_alarm_period_secs": 2, + "errors_alarm_threshold": 3, + "throttles_alarm_evaluation_periods": 4, + "throttles_alarm_period_secs": 5, + "throttles_alarm_threshold": 6 + }, + "outputs": { + "aws-lambda": [ + "test-lambda-output" + ], + "aws-s3": [ + "test-s3-output" + ] + }, + "source_bucket": "unit.testing.streamalert.source", + "source_current_hash": "12345", + "source_object_key": "lambda/alert/source.zip", + "third_party_libraries": [], + "timeout": 60, + "vpc_config": { + "security_group_ids": [ + "sg-abc" + ], + "subnet_ids": [ + "subnet-123" + ] + } }, "athena_partition_refresh_config": { "current_version": "$LATEST", diff --git a/tests/unit/helpers/base.py b/tests/unit/helpers/base.py index acdf671c0..d92bea8bd 100644 --- a/tests/unit/helpers/base.py +++ b/tests/unit/helpers/base.py @@ -70,6 +70,9 @@ def __setitem__(self, key, new_value): self.config.__setitem__(key, new_value) self.write() + def clusters(self): + return self.config['clusters'].keys() + def get(self, key): return self.config.get(key) @@ -113,21 +116,14 @@ def basic_streamalert_config(): }, 'lambda': { 'alert_processor_config': { + 'current_version': '$LATEST', 'handler': 'stream_alert.alert_processor.main.handler', + 'memory': 128, 'source_bucket': 'unit-testing.streamalert.source', 'source_current_hash': '', 'source_object_key': '', - 'third_party_libraries': [] - }, - 'rule_processor_config': { - 'handler': 'stream_alert.rule_processor.main.handler', - 'source_bucket': 'unit-testing.streamalert.source', - 'source_current_hash': '', - 'source_object_key': '', - 'third_party_libraries': [ - 'jsonpath_rw', - 'netaddr' - ] + 'third_party_libraries': [], + 'timeout': 10 }, 'athena_partition_refresh_config': { 'current_version': '$LATEST', @@ -147,6 +143,40 @@ def basic_streamalert_config(): 'third_party_libraries': [], 'timeout': 60 }, + 'rule_processor_config': { + 'handler': 'stream_alert.rule_processor.main.handler', + 'source_bucket': 'unit-testing.streamalert.source', + 'source_current_hash': '', + 'source_object_key': '', + 'third_party_libraries': [ + 'jsonpath_rw', + 'netaddr' + ] + }, + 'threat_intel_downloader_config': { + 'autoscale': True, + 'current_version': '$LATEST', + 'enabled': True, + 'handler': 'stream_alert.threat_intel_downloader.main.handler', + 'interval': 'rate(1 day)', + 'ioc_filters': [], + 'ioc_keys': [], + 'ioc_types': [], + 'log_level': 'info', + 'max_read_capacity': 1000, + 'memory': 128, + 'min_read_capacity': 100, + 'source_bucket': 'unit-testing.streamalert.source', + 'source_current_hash': '', + 'source_object_key': '', + 'table_rcu': 1000, + 'table_wcu': 200, + 'target_utilization': 70, + 'third_party_libraries': [ + 'requests' + ], + 'timeout': 120 + } }, 'clusters': { 'prod': { @@ -169,11 +199,6 @@ def basic_streamalert_config(): 'enabled': True }, 'stream_alert': { - 'alert_processor': { - 'current_version': '$LATEST', - 'memory': 128, - 'timeout': 10 - }, 'rule_processor': { 'current_version': '$LATEST', "enable_metrics": True, @@ -201,6 +226,36 @@ def basic_streamalert_config(): ] }, 'region': 'us-east-1' + }, + 'corp': { + 'id': 'corp', + 'modules': { + 'stream_alert': { + 'rule_processor': { + 'current_version': '$LATEST', + 'memory': 128, + 'timeout': 10 + } + }, + 'stream_alert_apps': { + 'box_collector': { + 'current_version': '$LATEST', + 'interval': 'rate(5 minutes)', + 'log_level': 'debug', + 'memory': 128, + 'timeout': 60, + 'type': 'box_admin_events' + }, + 'duo_admin_collector': { + 'current_version': '$LATEST', + 'interval': 'rate(30 minutes)', + 'memory': 128, + 'timeout': 40, + 'type': 'duo_admin' + } + } + }, + 'region': 'us-east-1' } } } diff --git a/tests/unit/stream_alert_cli/manage_lambda/__init__.py b/tests/unit/stream_alert_cli/manage_lambda/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/stream_alert_cli/manage_lambda/test_rollback.py b/tests/unit/stream_alert_cli/manage_lambda/test_rollback.py new file mode 100644 index 000000000..846971a56 --- /dev/null +++ b/tests/unit/stream_alert_cli/manage_lambda/test_rollback.py @@ -0,0 +1,121 @@ +"""Test ./manage.py lambda rollback functionality.""" +import unittest + +from mock import call, patch +from nose.tools import assert_equal + +from stream_alert_cli.manage_lambda import rollback +from tests.unit.helpers.base import basic_streamalert_config, MockCLIConfig + + +class MockOptions(object): + """Simple mock for the options parsed from the command line arguments.""" + + def __init__(self, clusters, processor): + self.clusters = clusters + self.processor = processor + + +@patch.object(rollback, 'LOGGER_CLI') +@patch.object(rollback, 'terraform_generate', return_value=True) +@patch.object(rollback.helpers, 'tf_runner') +class RollbackTest(unittest.TestCase): + """Test the config updates and Terraform targets affected during a Lambda rollback.""" + + def setUp(self): + self.config = MockCLIConfig(config=basic_streamalert_config()) + + # Find all function config sections (with 'current_version') + self.alert_config = self.config['lambda']['alert_processor_config'] + self.apps_config_box = ( + self.config['clusters']['corp']['modules']['stream_alert_apps']['box_collector']) + self.apps_config_duo = ( + self.config['clusters']['corp']['modules']['stream_alert_apps']['duo_admin_collector']) + self.athena_config = self.config['lambda']['athena_partition_refresh_config'] + self.downloader_config = self.config['lambda']['threat_intel_downloader_config'] + self.rule_config_prod = ( + self.config['clusters']['prod']['modules']['stream_alert']['rule_processor']) + self.rule_config_corp = ( + self.config['clusters']['corp']['modules']['stream_alert']['rule_processor']) + + self.func_configs = [ + self.alert_config, self.apps_config_box, self.apps_config_duo, self.athena_config, + self.downloader_config, self.rule_config_prod, self.rule_config_corp + ] + + def test_rollback_all(self, mock_runner, mock_generate, mock_logger): + """CLI - Lambda Rollback all""" + options = MockOptions(None, ['all']) + + for config in self.func_configs: + config['current_version'] = 3 + + rollback.rollback(options, self.config) + + # Verify that all of the versions were rolled back + for config in self.func_configs: + assert_equal(config['current_version'], 2) + + mock_logger.assert_not_called() + mock_generate.assert_called_once_with(config=self.config) + mock_runner.assert_called_once_with(targets=[ + 'module.alert_processor_lambda', + 'module.box_collector_corp', + 'module.duo_admin_collector_corp', + 'module.stream_alert_athena', + 'module.stream_alert_corp', + 'module.stream_alert_prod', + 'module.threat_intel_downloader' + ]) + + def test_rollback_all_invalid(self, mock_runner, mock_generate, mock_logger): + """CLI - Lambda Rollback all invalid""" + options = MockOptions(None, ['all']) + + # Versions $LATEST and 1 cannot be rolled back. + self.alert_config['current_version'] = 1 + rollback.rollback(options, self.config) + + fmt = '%s cannot be rolled back from version %s' + mock_logger.assert_has_calls([ + call.warn(fmt, 'alert_processor', '1'), + call.warn(fmt, 'duo_admin_collector_corp', '$LATEST'), + call.warn(fmt, 'box_collector_corp', '$LATEST'), + call.warn(fmt, 'athena_partition_refresh', '$LATEST'), + call.warn(fmt, 'rule_processor_prod', '$LATEST'), + call.warn(fmt, 'rule_processor_corp', '$LATEST'), + call.warn(fmt, 'threat_intel_downloader_config', '$LATEST') + ], any_order=True) + + # We should have returned early - no Terraform actions necessary + mock_generate.assert_not_called() + mock_runner.assert_not_called() + + def test_rollback_alert_processor(self, mock_runner, mock_generate, mock_logger): + """CLI - Lambda Rollback global alert processor""" + options = MockOptions(None, ['alert']) + self.alert_config['current_version'] = 5 + + rollback.rollback(options, self.config) + + assert_equal(4, self.alert_config['current_version']) + mock_logger.assert_not_called() + mock_generate.assert_called_once_with(config=self.config) + mock_runner.assert_called_once_with(targets=['module.alert_processor_lambda']) + + def test_rollback_rule_single_cluster(self, mock_runner, mock_generate, mock_logger): + """CLI - Lambda Rollback rule processor in one cluster""" + options = MockOptions(['prod'], ['rule']) + + self.rule_config_corp['current_version'] = 2 + self.rule_config_prod['current_version'] = 2 + + rollback.rollback(options, self.config) + + # Only the prod rule processor should have been rolled back + assert_equal(2, self.rule_config_corp['current_version']) + assert_equal(1, self.rule_config_prod['current_version']) + + mock_logger.assert_not_called() + mock_generate.assert_called_once_with(config=self.config) + mock_runner.assert_called_once_with(targets=['module.stream_alert_prod']) diff --git a/tests/unit/stream_alert_cli/terraform/test_alert_processor.py b/tests/unit/stream_alert_cli/terraform/test_alert_processor.py new file mode 100644 index 000000000..9309b81d5 --- /dev/null +++ b/tests/unit/stream_alert_cli/terraform/test_alert_processor.py @@ -0,0 +1,122 @@ +""" +Copyright 2017-present, Airbnb Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import unittest + +from nose.tools import assert_equal + +from stream_alert_cli.config import CLIConfig +from stream_alert_cli.terraform import alert_processor + + +class TestAlertProcessor(unittest.TestCase): + """Test the Terraform generation for the alert processor""" + + def setUp(self): + """Create the CLIConfig and the expected template for these tests.""" + self.config = dict(CLIConfig(config_path='tests/unit/conf')) + self.alert_proc_config = self.config['lambda']['alert_processor_config'] + + def test_generate_all_options(self): + """CLI - Terraform Generate Alert Processor - All Options""" + result = alert_processor.generate_alert_processor(config=self.config) + expected = { + 'module': { + 'alert_processor_iam': { + 'account_id': '12345678910', + 'kms_key_arn': '${aws_kms_key.stream_alert_secrets.arn}', + 'output_lambda_functions': ['test-lambda-output'], + 'output_s3_buckets': ['test-s3-output'], + 'prefix': 'unit-testing', + 'region': 'us-west-1', + 'role_id': '${module.alert_processor_lambda.role_id}', + 'source': 'modules/tf_alert_processor_iam' + }, + 'alert_processor_lambda': { + 'alarm_actions': ['arn:aws:sns:us-west-1:12345678910:stream_alert_monitoring'], + 'aliased_version': '$LATEST', + 'description': 'StreamAlert Alert Processor', + 'enable_metric_alarms': True, + 'environment_variables': { + 'LOGGER_LEVEL': 'info' + }, + 'errors_alarm_evaluation_periods': 1, + 'errors_alarm_period_secs': 2, + 'errors_alarm_threshold': 3, + 'function_name': 'unit-testing_streamalert_alert_processor', + 'handler': 'main.handler', + 'log_retention_days': 7, + 'memory_size_mb': 128, + 'source': 'modules/tf_lambda', + 'source_bucket': 'unit.testing.streamalert.source', + 'source_object_key': 'lambda/alert/source.zip', + 'throttles_alarm_evaluation_periods': 4, + 'throttles_alarm_period_secs': 5, + 'throttles_alarm_threshold': 6, + 'timeout_sec': 60, + 'vpc_security_group_ids': ['sg-abc'], + 'vpc_subnet_ids': ['subnet-123'] + } + } + } + assert_equal(expected, result) + + def test_generate_minimal_options(self): + """CLI - Terraform Generate Alert Processor - Minimal Options""" + for key in ['log_level', 'log_retention_days', 'metric_alarms', 'outputs', 'vpc_config']: + del self.alert_proc_config[key] + + result = alert_processor.generate_alert_processor(config=self.config) + + expected = { + 'module': { + 'alert_processor_iam': { + 'account_id': '12345678910', + 'kms_key_arn': '${aws_kms_key.stream_alert_secrets.arn}', + 'output_lambda_functions': [], + 'output_s3_buckets': [], + 'prefix': 'unit-testing', + 'region': 'us-west-1', + 'role_id': '${module.alert_processor_lambda.role_id}', + 'source': 'modules/tf_alert_processor_iam' + }, + 'alert_processor_lambda': { + 'alarm_actions': ['arn:aws:sns:us-west-1:12345678910:stream_alert_monitoring'], + 'aliased_version': '$LATEST', + 'description': 'StreamAlert Alert Processor', + 'enable_metric_alarms': True, + 'environment_variables': { + 'LOGGER_LEVEL': 'info' + }, + 'function_name': 'unit-testing_streamalert_alert_processor', + 'handler': 'main.handler', + 'log_retention_days': 14, + 'memory_size_mb': 128, + 'source': 'modules/tf_lambda', + 'source_bucket': 'unit.testing.streamalert.source', + 'source_object_key': 'lambda/alert/source.zip', + 'timeout_sec': 60, + 'vpc_security_group_ids': [], + 'vpc_subnet_ids': [] + } + } + } + assert_equal(expected, result) + + def test_generate_no_metric_alarms(self): + """CLI - Terraform Generate Alert Processor - Metric Alarms Disabled""" + self.alert_proc_config['metric_alarms']['enabled'] = False + result = alert_processor.generate_alert_processor(config=self.config) + assert_equal(False, result['module']['alert_processor_lambda']['enable_metric_alarms']) diff --git a/tests/unit/stream_alert_cli/terraform/test_generate.py b/tests/unit/stream_alert_cli/terraform/test_generate.py index 940331840..506b8f2d6 100644 --- a/tests/unit/stream_alert_cli/terraform/test_generate.py +++ b/tests/unit/stream_alert_cli/terraform/test_generate.py @@ -254,7 +254,6 @@ def test_generate_stream_alert_test(self): 'region': 'us-west-1', 'prefix': 'unit-testing', 'cluster': 'test', - 'kms_key_arn': '${aws_kms_key.stream_alert_secrets.arn}', 'dynamodb_ioc_table': 'test_table_name', 'threat_intel_enabled': False, 'rule_processor_enable_metrics': True, @@ -263,12 +262,6 @@ def test_generate_stream_alert_test(self): 'rule_processor_timeout': 25, 'rule_processor_version': '$LATEST', 'rule_processor_config': '${var.rule_processor_config}', - 'alert_processor_enable_metrics': True, - 'alert_processor_log_level': 'info', - 'alert_processor_memory': 128, - 'alert_processor_timeout': 25, - 'alert_processor_version': '$LATEST', - 'alert_processor_config': '${var.alert_processor_config}', } } } @@ -292,7 +285,6 @@ def test_generate_stream_alert_advanced(self): 'region': 'us-west-1', 'prefix': 'unit-testing', 'cluster': 'advanced', - 'kms_key_arn': '${aws_kms_key.stream_alert_secrets.arn}', 'dynamodb_ioc_table': 'test_table_name', 'threat_intel_enabled': False, 'rule_processor_enable_metrics': True, @@ -301,18 +293,7 @@ def test_generate_stream_alert_advanced(self): 'rule_processor_timeout': 25, 'rule_processor_version': '$LATEST', 'rule_processor_config': '${var.rule_processor_config}', - 'alert_processor_enable_metrics': True, - 'alert_processor_log_level': 'info', - 'alert_processor_memory': 128, - 'alert_processor_timeout': 25, - 'alert_processor_version': '$LATEST', - 'alert_processor_config': '${var.alert_processor_config}', - 'output_lambda_functions': ['my-lambda-function:production'], - 'output_s3_buckets': ['my-s3-bucket.with.data'], 'input_sns_topics': ['my-sns-topic-name'], - 'alert_processor_vpc_enabled': True, - 'alert_processor_vpc_subnet_ids': ['subnet-id-1'], - 'alert_processor_vpc_security_group_ids': ['sg-id-1'] } } } diff --git a/tests/unit/stream_alert_cli/terraform/test_monitoring.py b/tests/unit/stream_alert_cli/terraform/test_monitoring.py index 8278c3990..fdc2dadc7 100644 --- a/tests/unit/stream_alert_cli/terraform/test_monitoring.py +++ b/tests/unit/stream_alert_cli/terraform/test_monitoring.py @@ -21,6 +21,7 @@ CONFIG = CLIConfig(config_path='tests/unit/conf') + def test_generate_cloudwatch_monitoring(): """CLI - Terraform Generate Cloudwatch Monitoring""" cluster_dict = _common.infinitedict() @@ -30,10 +31,7 @@ def test_generate_cloudwatch_monitoring(): expected_cloudwatch_tf = { 'source': 'modules/tf_stream_alert_monitoring', 'sns_topic_arn': 'arn:aws:sns:us-west-1:12345678910:stream_alert_monitoring', - 'lambda_functions': [ - 'unit-testing_test_streamalert_rule_processor', - 'unit-testing_test_streamalert_alert_processor' - ], + 'lambda_functions': ['unit-testing_test_streamalert_rule_processor'], 'kinesis_stream': 'unit-testing_test_stream_alert_kinesis', 'lambda_alarms_enabled': True, 'kinesis_alarms_enabled': True @@ -44,6 +42,7 @@ def test_generate_cloudwatch_monitoring(): cluster_dict['module']['cloudwatch_monitoring_test'], expected_cloudwatch_tf) + def test_generate_cloudwatch_monitoring_with_settings(): """CLI - Terraform Generate Cloudwatch Monitoring with Custom Settings""" cluster_dict = _common.infinitedict() @@ -53,10 +52,7 @@ def test_generate_cloudwatch_monitoring_with_settings(): expected_cloudwatch_tf = { 'source': 'modules/tf_stream_alert_monitoring', 'sns_topic_arn': 'arn:aws:sns:us-west-1:12345678910:stream_alert_monitoring', - 'lambda_functions': [ - 'unit-testing_advanced_streamalert_rule_processor', - 'unit-testing_advanced_streamalert_alert_processor' - ], + 'lambda_functions': ['unit-testing_advanced_streamalert_rule_processor'], 'kinesis_stream': 'unit-testing_advanced_stream_alert_kinesis', 'lambda_alarms_enabled': True, 'kinesis_alarms_enabled': True, @@ -68,6 +64,7 @@ def test_generate_cloudwatch_monitoring_with_settings(): cluster_dict['module']['cloudwatch_monitoring_advanced'], expected_cloudwatch_tf) + def test_generate_cloudwatch_monitoring_disabled(): """CLI - Terraform Generate Cloudwatch Monitoring Disabled""" cluster_dict = _common.infinitedict() @@ -77,6 +74,7 @@ def test_generate_cloudwatch_monitoring_disabled(): assert_true(result) assert_true('cloudwatch_monitoring_{}'.format(cluster) not in cluster_dict['module']) + def test_generate_cloudwatch_monitoring_no_kinesis(): """CLI - Terraform Generate Cloudwatch Monitoring - Kinesis Disabled""" cluster_dict = _common.infinitedict() @@ -88,10 +86,7 @@ def test_generate_cloudwatch_monitoring_no_kinesis(): expected_cloudwatch_tf = { 'source': 'modules/tf_stream_alert_monitoring', 'sns_topic_arn': 'arn:aws:sns:us-west-1:12345678910:stream_alert_monitoring', - 'lambda_functions': [ - 'unit-testing_test_streamalert_rule_processor', - 'unit-testing_test_streamalert_alert_processor' - ], + 'lambda_functions': ['unit-testing_test_streamalert_rule_processor'], 'lambda_alarms_enabled': True, 'kinesis_alarms_enabled': False } @@ -101,6 +96,7 @@ def test_generate_cloudwatch_monitoring_no_kinesis(): cluster_dict['module']['cloudwatch_monitoring_test'], expected_cloudwatch_tf) + def test_generate_cloudwatch_monitoring_no_lambda(): """CLI - Terraform Generate Cloudwatch Monitoring - Lambda Disabled""" cluster_dict = _common.infinitedict() @@ -122,14 +118,14 @@ def test_generate_cloudwatch_monitoring_no_lambda(): cluster_dict['module']['cloudwatch_monitoring_test'], expected_cloudwatch_tf) + def test_generate_cloudwatch_monitoring_custom_sns(): """CLI - Terraform Generate Cloudwatch Monitoring with Existing SNS Topic""" # Test a custom SNS topic name CONFIG['clusters']['test']['modules']['cloudwatch_monitoring'] = {'enabled': True} CONFIG['global']['infrastructure']['monitoring']['create_sns_topic'] = False - CONFIG['global']['infrastructure']['monitoring']\ - ['sns_topic_name'] = 'unit_test_monitoring' + CONFIG['global']['infrastructure']['monitoring']['sns_topic_name'] = 'unit_test_monitoring' cluster_dict = _common.infinitedict() result = monitoring.generate_monitoring('test', cluster_dict, CONFIG) @@ -137,10 +133,7 @@ def test_generate_cloudwatch_monitoring_custom_sns(): expected_cloudwatch_tf_custom = { 'source': 'modules/tf_stream_alert_monitoring', 'sns_topic_arn': 'arn:aws:sns:us-west-1:12345678910:unit_test_monitoring', - 'lambda_functions': [ - 'unit-testing_test_streamalert_rule_processor', - 'unit-testing_test_streamalert_alert_processor' - ], + 'lambda_functions': ['unit-testing_test_streamalert_rule_processor'], 'kinesis_stream': 'unit-testing_test_stream_alert_kinesis', 'lambda_alarms_enabled': True, 'kinesis_alarms_enabled': True @@ -151,6 +144,7 @@ def test_generate_cloudwatch_monitoring_custom_sns(): cluster_dict['module']['cloudwatch_monitoring_test'], expected_cloudwatch_tf_custom) + @patch('stream_alert_cli.terraform.monitoring.LOGGER_CLI') def test_generate_cloudwatch_monitoring_invalid_config(mock_logging): """CLI - Terraform Generate Cloudwatch Monitoring with Invalid Config""" diff --git a/tests/unit/stream_alert_cli/test_config.py b/tests/unit/stream_alert_cli/test_config.py index cfaa35de8..563d8d98e 100644 --- a/tests/unit/stream_alert_cli/test_config.py +++ b/tests/unit/stream_alert_cli/test_config.py @@ -174,6 +174,7 @@ def test_add_threat_intel_without_table_name(self, write_mock, log_mock): @patch('stream_alert_cli.config.CLIConfig.write') def test_add_threat_intel_downloader(self, write_mock, log_mock): """CLI - Add Threat Intel Downloader config""" + del self.config['lambda']['threat_intel_downloader_config'] ti_downloader_info = { 'autoscale': True, 'command': 'threat_intel_downloader', diff --git a/tests/unit/stream_alert_rule_processor/test_handler.py b/tests/unit/stream_alert_rule_processor/test_handler.py index 979a7c76f..aa91f56b7 100644 --- a/tests/unit/stream_alert_rule_processor/test_handler.py +++ b/tests/unit/stream_alert_rule_processor/test_handler.py @@ -17,6 +17,7 @@ import base64 import json import logging +import os from mock import call, patch from moto import mock_kinesis @@ -47,6 +48,7 @@ class TestStreamAlert(object): @patch('stream_alert.rule_processor.handler.load_config', lambda: load_config('tests/unit/conf/')) + @patch.dict(os.environ, {'ALERT_PROCESSOR': 'unit-testing_streamalert_alert_processor'}) def setup(self): """Setup before each method""" self.__sa_handler = StreamAlert(get_mock_context(), False) @@ -265,6 +267,7 @@ def test_firehose_record_delivery_disabled_logs(self): @patch('stream_alert.rule_processor.threat_intel.StreamThreatIntel._query') @patch('stream_alert.rule_processor.threat_intel.StreamThreatIntel.load_from_config') + @patch.dict(os.environ, {'ALERT_PROCESSOR': 'unit-testing_streamalert_alert_processor'}) def test_run_threat_intel_enabled(self, mock_threat_intel, mock_query): # pylint: disable=no-self-use """StreamAlert Class - Run SA when threat intel enabled""" @rule(datatypes=['sourceAddress'], outputs=['s3:sample_bucket']) diff --git a/tests/unit/stream_alert_rule_processor/test_sink.py b/tests/unit/stream_alert_rule_processor/test_sink.py index 0f875d0e4..ba3d91686 100644 --- a/tests/unit/stream_alert_rule_processor/test_sink.py +++ b/tests/unit/stream_alert_rule_processor/test_sink.py @@ -14,6 +14,7 @@ limitations under the License. """ from datetime import datetime +import os from botocore.exceptions import ClientError from mock import patch @@ -33,7 +34,8 @@ def setup_class(cls): cls.boto_mock = patcher.start() context = get_mock_context() env = load_env(context) - cls.sinker = StreamSink(env) + with patch.dict(os.environ, {'ALERT_PROCESSOR': 'corp-prefix_streamalert_alert_processor'}): + cls.sinker = StreamSink(env) @classmethod def teardown_class(cls): @@ -47,7 +49,7 @@ def teardown(self): def test_streamsink_init(self): """StreamSink - Init""" - assert_equal(self.sinker.function, 'corp-prefix_prod_streamalert_alert_processor') + assert_equal(self.sinker.function, 'corp-prefix_streamalert_alert_processor') @patch('stream_alert.rule_processor.sink.LOGGER.exception') def test_streamsink_sink_boto_error(self, log_mock): @@ -63,7 +65,7 @@ def test_streamsink_sink_boto_error(self, log_mock): log_mock.assert_called_with('An error occurred while sending alert to ' '\'%s:production\'. Error is: %s. Alert: %s', - 'corp-prefix_prod_streamalert_alert_processor', + 'corp-prefix_streamalert_alert_processor', err_response, '"alert!!!"') @@ -76,7 +78,7 @@ def test_streamsink_sink_resp_error(self, log_mock): self.sinker.sink(['alert!!!']) log_mock.assert_called_with('Failed to send alert to \'%s\': %s', - 'corp-prefix_prod_streamalert_alert_processor', + 'corp-prefix_streamalert_alert_processor', '"alert!!!"') @patch('stream_alert.rule_processor.sink.LOGGER.info') @@ -95,7 +97,7 @@ def test_streamsink_sink_success(self, log_mock): self.sinker.sink(['alert!!!']) log_mock.assert_called_with('Sent alert to \'%s\' with Lambda request ID \'%s\'', - 'corp-prefix_prod_streamalert_alert_processor', + 'corp-prefix_streamalert_alert_processor', 'reqID') @patch('stream_alert.rule_processor.sink.LOGGER.error')