From 9b521a268150567babc6badf0aee7c7f54f3f9fc Mon Sep 17 00:00:00 2001 From: Jack Naglieri Date: Mon, 26 Jun 2017 19:30:03 -0700 Subject: [PATCH 01/10] [conf] reset the alert/rule processor versions to LATEST --- conf/clusters/prod.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/clusters/prod.json b/conf/clusters/prod.json index 03336a78f..cb53109e2 100644 --- a/conf/clusters/prod.json +++ b/conf/clusters/prod.json @@ -19,12 +19,12 @@ }, "stream_alert": { "alert_processor": { - "current_version": 7, + "current_version": "$LATEST", "memory": 128, "timeout": 25 }, "rule_processor": { - "current_version": 8, + "current_version": "$LATEST", "memory": 256, "timeout": 10 } From 2ecf848201c21679fadafd0514245b88b4a88ff2 Mon Sep 17 00:00:00 2001 From: Jack Naglieri Date: Mon, 26 Jun 2017 19:34:28 -0700 Subject: [PATCH 02/10] [conf] use minimums for cluster lambda/kinesis settings --- conf/clusters/prod.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/conf/clusters/prod.json b/conf/clusters/prod.json index cb53109e2..a640b8ce2 100644 --- a/conf/clusters/prod.json +++ b/conf/clusters/prod.json @@ -10,8 +10,8 @@ "s3_bucket_suffix": "streamalert.results" }, "streams": { - "retention": 36, - "shards": 5 + "retention": 24, + "shards": 1 } }, "kinesis_events": { @@ -21,11 +21,11 @@ "alert_processor": { "current_version": "$LATEST", "memory": 128, - "timeout": 25 + "timeout": 10 }, "rule_processor": { "current_version": "$LATEST", - "memory": 256, + "memory": 128, "timeout": 10 } } From 7865f2ee31817415440e68d22333f94f8d2145a3 Mon Sep 17 00:00:00 2001 From: Jack Naglieri Date: Mon, 26 Jun 2017 19:39:18 -0700 Subject: [PATCH 03/10] [tf] support all cloudtrail tf module options + unit tests --- stream_alert_cli/terraform_generate.py | 32 +++++++++- .../tf_stream_alert_cloudtrail/main.tf | 15 ++--- .../tf_stream_alert_cloudtrail/variables.tf | 4 ++ .../test_terraform_generate.py | 58 +++++++++++++++++++ 4 files changed, 97 insertions(+), 12 deletions(-) diff --git a/stream_alert_cli/terraform_generate.py b/stream_alert_cli/terraform_generate.py index 3581cc394..8046b6f46 100644 --- a/stream_alert_cli/terraform_generate.py +++ b/stream_alert_cli/terraform_generate.py @@ -352,6 +352,32 @@ def generate_cloudtrail(cluster_name, cluster_dict, config): """ modules = config['clusters'][cluster_name]['modules'] cloudtrail_enabled = bool(modules['cloudtrail']['enabled']) + existing_trail_default = False + existing_trail = modules['cloudtrail'].get('existing_trail', existing_trail_default) + is_global_trail_default = True + is_global_trail = modules['cloudtrail'].get('is_global_trail', is_global_trail_default) + event_pattern_default = { + 'account': [config['global']['account']['aws_account_id']] + } + event_pattern = modules['cloudtrail'].get('event_pattern', event_pattern_default) + + # From here: + # http://docs.aws.amazon.com/AmazonCloudWatch/latest/events/CloudWatchEventsandEventPatterns.html + valid_event_pattern_keys = { + 'version', + 'id', + 'detail-type', + 'source', + 'account', + 'time', + 'region', + 'resources', + 'detail' + } + if not set(event_pattern.keys()).issubset(valid_event_pattern_keys): + LOGGER_CLI.error('Invalid CloudWatch Event Pattern!') + sys.exit(1) + cluster_dict['module']['cloudtrail_{}'.format(cluster_name)] = { 'account_id': config['global']['account']['aws_account_id'], 'cluster': cluster_name, @@ -360,7 +386,11 @@ def generate_cloudtrail(cluster_name, cluster_dict, config): 'enable_logging': cloudtrail_enabled, 'source': 'modules/tf_stream_alert_cloudtrail', 's3_logging_bucket': '{}.streamalert.s3-logging'.format( - config['global']['account']['prefix'])} + config['global']['account']['prefix']), + 'existing_trail': existing_trail, + 'is_global_trail': is_global_trail, + 'event_pattern': json.dumps(event_pattern) + } def generate_flow_logs(cluster_name, cluster_dict, config): diff --git a/terraform/modules/tf_stream_alert_cloudtrail/main.tf b/terraform/modules/tf_stream_alert_cloudtrail/main.tf index 0b9c05885..e61de6da0 100644 --- a/terraform/modules/tf_stream_alert_cloudtrail/main.tf +++ b/terraform/modules/tf_stream_alert_cloudtrail/main.tf @@ -79,17 +79,10 @@ data "aws_iam_policy_document" "cloudtrail_bucket" { // Cloudwatch event to capture Cloudtrail API calls resource "aws_cloudwatch_event_rule" "all_events" { - name = "${var.prefix}_${var.cluster}_streamalert_all_events" - description = "Capture all CloudWatch events" - role_arn = "${aws_iam_role.streamalert_cloudwatch_role.arn}" - - event_pattern = < Date: Mon, 26 Jun 2017 20:02:29 -0700 Subject: [PATCH 04/10] [conf] force_destroy enabled for prerequisite buckets --- stream_alert_cli/terraform_generate.py | 2 +- test/unit/stream_alert_cli/test_terraform_generate.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/stream_alert_cli/terraform_generate.py b/stream_alert_cli/terraform_generate.py index 8046b6f46..334a8140b 100644 --- a/stream_alert_cli/terraform_generate.py +++ b/stream_alert_cli/terraform_generate.py @@ -53,7 +53,7 @@ def generate_s3_bucket(**kwargs): 'target_bucket': logging_bucket, 'target_prefix': '{}/'.format(bucket_name) } - force_destroy = kwargs.get('force_destroy', False) + force_destroy = kwargs.get('force_destroy', True) versioning = kwargs.get('versioning', True) lifecycle_rule = kwargs.get('lifecycle_rule') diff --git a/test/unit/stream_alert_cli/test_terraform_generate.py b/test/unit/stream_alert_cli/test_terraform_generate.py index c9ddd6617..f5f1f4206 100644 --- a/test/unit/stream_alert_cli/test_terraform_generate.py +++ b/test/unit/stream_alert_cli/test_terraform_generate.py @@ -238,7 +238,7 @@ def test_generate_main(self): 'lambda_source': { 'bucket': 'unit.testing.source.bucket', 'acl': 'private', - 'force_destroy': False, + 'force_destroy': True, 'versioning': { 'enabled': True }, @@ -250,7 +250,7 @@ def test_generate_main(self): 'stream_alert_secrets': { 'bucket': 'unit-testing.streamalert.secrets', 'acl': 'private', - 'force_destroy': False, + 'force_destroy': True, 'versioning': { 'enabled': True }, @@ -262,7 +262,7 @@ def test_generate_main(self): 'terraform_remote_state': { 'bucket': 'unit-testing.terraform.tfstate', 'acl': 'private', - 'force_destroy': False, + 'force_destroy': True, 'versioning': { 'enabled': True }, @@ -274,7 +274,7 @@ def test_generate_main(self): 'logging_bucket': { 'bucket': 'unit-testing.streamalert.s3-logging', 'acl': 'log-delivery-write', - 'force_destroy': False, + 'force_destroy': True, 'versioning': { 'enabled': True }, From dc17bbe792580883ed14a37863c1bf365c48ad50 Mon Sep 17 00:00:00 2001 From: Jack Naglieri Date: Mon, 26 Jun 2017 20:35:57 -0700 Subject: [PATCH 05/10] [conf] add a terraform clean command --- stream_alert_cli.py | 8 +++++++- stream_alert_cli/runner.py | 39 ++++++++++++++++++++++++-------------- 2 files changed, 32 insertions(+), 15 deletions(-) diff --git a/stream_alert_cli.py b/stream_alert_cli.py index 1aea1ded0..4cc168274 100755 --- a/stream_alert_cli.py +++ b/stream_alert_cli.py @@ -178,7 +178,13 @@ def build_parser(): # add subcommand options for the terraform sub-parser tf_parser.add_argument( 'subcommand', - choices=['build', 'destroy', 'init', 'init-backend', 'generate', 'status'] + choices=['build', + 'clean', + 'destroy', + 'init', + 'init-backend', + 'generate', + 'status'] ) tf_parser.add_argument( diff --git a/stream_alert_cli/runner.py b/stream_alert_cli/runner.py index 97c982c04..1f5065c24 100644 --- a/stream_alert_cli/runner.py +++ b/stream_alert_cli/runner.py @@ -162,6 +162,9 @@ def terraform_handler(options): LOGGER_CLI.info('Building Remainder Infrastructure') tf_runner() + elif options.subcommand == 'clean': + terraform_clean() + elif options.subcommand == 'destroy': if options.target: target = options.target @@ -181,26 +184,34 @@ def terraform_handler(options): sys.exit(1) # Remove old Terraform files - LOGGER_CLI.info('Removing old Terraform files') - cleanup_files = ['{}.tf'.format(cluster) for cluster in CONFIG.clusters()] - cleanup_files.extend([ - 'main.tf', - 'terraform.tfstate', - 'terraform.tfstate.backup' - ]) - for tf_file in cleanup_files: - file_to_remove = 'terraform/{}'.format(tf_file) - if not os.path.isfile(file_to_remove): - continue - os.remove(file_to_remove) - # Finally, delete the Terraform directory - shutil.rmtree('terraform/.terraform/') + terraform_clean() # get a quick status on our declared infrastructure elif options.subcommand == 'status': status() +def terraform_clean(): + """Remove leftover Terraform statefiles and main/cluster files""" + LOGGER_CLI.info('Cleaning Terraform files') + + cleanup_files = ['{}.tf'.format(cluster) for cluster in CONFIG.clusters()] + cleanup_files.extend([ + 'main.tf', + 'terraform.tfstate', + 'terraform.tfstate.backup' + ]) + for tf_file in cleanup_files: + file_to_remove = 'terraform/{}'.format(tf_file) + if not os.path.isfile(file_to_remove): + continue + os.remove(file_to_remove) + + # Finally, delete the Terraform directory + if os.path.isdir('terraform/.terraform/'): + shutil.rmtree('terraform/.terraform/') + + def run_command(args=None, **kwargs): """Alias to CLI Helpers.run_command""" return helpers.run_command(args, **kwargs) From 291f62984bff10ddda3b6648d2998e6d2d433c6f Mon Sep 17 00:00:00 2001 From: Jack Naglieri Date: Mon, 26 Jun 2017 20:57:07 -0700 Subject: [PATCH 06/10] [cli] fix bug in runner init targets --- stream_alert_cli/runner.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/stream_alert_cli/runner.py b/stream_alert_cli/runner.py index 1f5065c24..93d46f11f 100644 --- a/stream_alert_cli/runner.py +++ b/stream_alert_cli/runner.py @@ -137,10 +137,9 @@ def terraform_handler(options): LOGGER_CLI.info('Building Initial Infrastructure') init_targets = [ 'aws_s3_bucket.lambda_source', - 'aws_s3_bucket.integration_testing', - 'aws_s3_bucket.terraform_state', - 'aws_s3_bucket.stream_alert_secrets', 'aws_s3_bucket.logging_bucket', + 'aws_s3_bucket.stream_alert_secrets', + 'aws_s3_bucket.terraform_remote_state', 'aws_kms_key.stream_alert_secrets', 'aws_kms_alias.stream_alert_secrets' ] From 6dadb70a0e3a593b2ccd9ba2715602bf55342246 Mon Sep 17 00:00:00 2001 From: Jack Naglieri Date: Mon, 26 Jun 2017 21:50:53 -0700 Subject: [PATCH 07/10] [cli] add defaults and tests to flow_logs module --- stream_alert_cli/terraform_generate.py | 8 +++++++- .../stream_alert_cli/test_terraform_generate.py | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/stream_alert_cli/terraform_generate.py b/stream_alert_cli/terraform_generate.py index 334a8140b..62d9825e5 100644 --- a/stream_alert_cli/terraform_generate.py +++ b/stream_alert_cli/terraform_generate.py @@ -403,11 +403,17 @@ def generate_flow_logs(cluster_name, cluster_dict, config): config [dict]: The loaded config from the 'conf/' directory """ modules = config['clusters'][cluster_name]['modules'] + flow_log_group_name_default = '{}_{}_streamalert_flow_logs'.format( + config['global']['account']['prefix'], + cluster_name + ) + flow_log_group_name = modules['flow_logs'].get('log_group_name', flow_log_group_name_default) + if modules['flow_logs']['enabled']: cluster_dict['module']['flow_logs_{}'.format(cluster_name)] = { 'source': 'modules/tf_stream_alert_flow_logs', 'destination_stream_arn': '${{module.kinesis_{}.arn}}'.format(cluster_name), - 'flow_log_group_name': modules['flow_logs']['log_group_name']} + 'flow_log_group_name': flow_log_group_name} for flow_log_input in ('vpcs', 'subnets', 'enis'): input_data = modules['flow_logs'].get(flow_log_input) if input_data: diff --git a/test/unit/stream_alert_cli/test_terraform_generate.py b/test/unit/stream_alert_cli/test_terraform_generate.py index f5f1f4206..dd65930d4 100644 --- a/test/unit/stream_alert_cli/test_terraform_generate.py +++ b/test/unit/stream_alert_cli/test_terraform_generate.py @@ -299,11 +299,27 @@ def test_generate_main(self): assert_equal(tf_main['terraform'], tf_main_expected['terraform']) assert_equal(tf_main['resource'], tf_main_expected['resource']) + def test_generate_stream_alert(self): """CLI - Terraform Generate stream_alert Module""" # TODO(jacknagz): Write this test pass + + def test_generate_flow_logs(self): + """CLI - Terraform Generate flow_logs Module""" + cluster_name = 'advanced' + terraform_generate.generate_flow_logs( + cluster_name, + self.cluster_dict, + self.config + ) + + flow_log_config = self.cluster_dict['module']['flow_logs_advanced'] + assert_equal(flow_log_config['flow_log_group_name'], 'unit-test-advanced') + assert_equal(flow_log_config['vpcs'], ['vpc-id-1', 'vpc-id-2']) + + def test_generate_cloudtrail_basic(self): """CLI - Terraform Generate cloudtrail Module""" cluster_name = 'advanced' From 7098ac160188373f3bc6507623463bb794b979de Mon Sep 17 00:00:00 2001 From: Jack Naglieri Date: Mon, 26 Jun 2017 21:51:16 -0700 Subject: [PATCH 08/10] [docs] update AWS permissions and cluster file options --- docs/source/account.rst | 68 ++++++++++++++++++++++++++++------------ docs/source/clusters.rst | 54 +++++++++++++++++++++++++------ 2 files changed, 92 insertions(+), 30 deletions(-) diff --git a/docs/source/account.rst b/docs/source/account.rst index c207114d2..5abf9d88b 100644 --- a/docs/source/account.rst +++ b/docs/source/account.rst @@ -26,33 +26,61 @@ prefix Open ``conf/global.json`` and ``conf/lambda.json`` and replace ``PREFIX_GOES_HERE`` with your company or organization name. -Administrator -~~~~~~~~~~~~~ - -To successfully deploy StreamAlert, you need to create an administrative user in the AWS account. - -Steps: +user account +~~~~~~~~~~~~ + +To deploy StreamAlert, you need to create an AWS user for administration. + +First, create the policy to attach to the user: + +* Go to: Services => IAM => Policies +* Click: Create policy +* Select: Create your Own Policy +* Name the policy ``streamalert``, and paste the following as the ``Policy Document``: +* Clock: Create Policy + +.. code-block:: + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "athena:*", + "cloudtrail:*", + "cloudwatch:*", + "ec2:*FlowLogs", + "events:*", + "firehose:*", + "iam:*", + "kinesis:*", + "kms:*", + "lambda:*", + "logs:*", + "s3:*", + "sns:*" + ], + "Resource": "*" + } + ] + } + +Next, create the user: * Go to: Services => IAM => Users * Click: Add user -* Username: streamalert +* Username: ``streamalert`` * Access type: Programmatic access -* Click: Next +* Click: ``Next: Permissions`` * Select: Attach existing policies directly -* Attach the following policies:: +* Attach the previously created ``streamalert`` policy +* Click: ``Next: Review``, and then ``Create user`` - * AmazonKinesisFirehoseFullAccess - * AmazonKinesisFullAccess - * AmazonS3FullAccess - * AmazonSNSFullAccess - * AWSLambdaFullAccess - * CloudWatchFullAccess - * CloudWatchLogsFullAccess - * IAMFullAccess -* Click: Next (Review), and then Create User - -Take the Access Key and Secret Key and export them to your environment variables:: +Copy the Access Key ID and Secret Access Key and export them to your environment variables:: $ export AWS_ACCESS_KEY_ID="REPLACE_ME" $ export AWS_SECRET_ACCESS_KEY="REPLACE_ME" $ export AWS_DEFAULT_REGION="us-east-1" + +.. note:: Remember to save your credentials in a safe place! diff --git a/docs/source/clusters.rst b/docs/source/clusters.rst index 7b5cbbbc3..e0d072a04 100644 --- a/docs/source/clusters.rst +++ b/docs/source/clusters.rst @@ -79,11 +79,11 @@ An example ``production`` cluster:: Customizing Clusters ~~~~~~~~~~~~~~~~~~~~ -Each cluster can be broken up into multiple modules to make up a StreamAlert cluster. +Each StreamAlert cluster is made up of multiple modules. Each module corresponds to a Terraform module found in the ``terraform/modules`` directory, and serves a specific purpose in a StreamAlert cluster. -After making modifications to a cluster's file, make sure you apply it with:: +After making modifications to a cluster file, make sure you apply the changes with:: $ python stream_alert_cli.py terraform build @@ -92,12 +92,18 @@ This will regenerate the necessary Terraform files and then apply the changes. Module: StreamAlert -------------------- -See `Lambda Settings `_ for customization options. +The main module for StreamAlert. + +It creates both AWS Lambda functions, aliases, an SNS topic, IAM permissions, and more. + +See `Lambda Settings `_ for all customization options. Module: Kinesis --------------- -See `Kinesis `_ for customization options. +This module contains configuration for the Kinesis Streams and Kinesis Firehose infrastructure. + +See `Kinesis `_ for all customization options. Module: CloudWatch Monitoring ----------------------------- @@ -134,7 +140,7 @@ Template:: Module: CloudTrail ------------------ -AWS CloudTrail is a service that enables compliance, operational auditing, and risk auditing of your AWS account. +`AWS CloudTrail `_ is a service that enables compliance, operational auditing, and risk auditing of your AWS account. StreamAlert has native support for enabling and monitoring CloudTrail logs with the ``cloudtrail`` module. @@ -142,11 +148,24 @@ When writing rules for CloudTrail data, use the ``cloudwatch:event`` log source. By default, all API calls will be logged and accessible from rules. -Template:: +**template** - "cloudtrail": { - "enabled": true - } +.. code-block:: + + "cloudtrail": { + "enabled": true + } + +**options** + +============= ======== ======= =========== +Key Required Default Description +------------- --------- ------- ----------- +``enabled`` Yes - To enable/disable the CloudTrail. +``existing_trail`` No ``false`` Set to ``true`` if the account has an existing CloudTrail. This is to avoid duplication of data collected by multiple CloudTrails. +``is_global_trail`` No ``true`` If the CloudTrail should collect events from any region. +``event_pattern`` No ``{"account": [""]}`` The CloudWatch Events pattern to send to Kinesis. `More information `_. +============= ========= ======= =========== Module: Flow Logs ----------------- @@ -157,7 +176,9 @@ In the settings below, an arbitrary amount of subnets, vpcs, and enis can be ena When writing rules for this data, use the ``cloudwatch:flow_logs`` log source. -Template:: +**template** + +.. code-block:: "flow_logs": { "enabled": true, @@ -175,3 +196,16 @@ Template:: "..." ] } + +**options** + +============= ======== ======= =========== +Key Required Default Description +------------- --------- ------- ----------- +``enabled`` Yes - To enable/disable the Flow log creation. +``log_group_name`` No prefix_cluster_streamalert_flow_logs The name of the CloudWatch Log group. +``subnets`` No None The list of AWS VPC subnet IDs to collect flow logs from. +``vpcs`` No None The list of AWS VPC IDs to collect flow logs from. +``enis`` No None The list of AWS ENIs to collect flow logs from. +============= ========= ======= =========== + \ No newline at end of file From 3c74456f24108a32c6bb36c3f40d018179c4a013 Mon Sep 17 00:00:00 2001 From: Jack Naglieri Date: Tue, 27 Jun 2017 10:24:39 -0700 Subject: [PATCH 09/10] [cli] error handling up and down the CLI stack --- stream_alert_cli/runner.py | 38 +++++++----- stream_alert_cli/terraform_generate.py | 84 ++++++++++++++++++++++---- 2 files changed, 96 insertions(+), 26 deletions(-) diff --git a/stream_alert_cli/runner.py b/stream_alert_cli/runner.py index 93d46f11f..a1c080b09 100644 --- a/stream_alert_cli/runner.py +++ b/stream_alert_cli/runner.py @@ -70,12 +70,14 @@ def lambda_handler(options): if options.subcommand == 'deploy': # Make sure the Terraform code is up to date - terraform_generate(config=CONFIG) + if not terraform_generate(config=CONFIG): + return deploy(options) elif options.subcommand == 'rollback': # Make sure the Terraform code is up to date - terraform_generate(config=CONFIG) + if not terraform_generate(config=CONFIG): + return rollback(options) elif options.subcommand == 'test': @@ -87,7 +89,7 @@ def terraform_check(): prereqs_message = ('Terraform not found! Please install and add to ' 'your $PATH:\n' '\t$ export PATH=$PATH:/usr/local/terraform/bin') - run_command(['terraform', 'version'], + return run_command(['terraform', 'version'], error_message=prereqs_message, quiet=True) @@ -103,7 +105,8 @@ def terraform_handler(options): # plan/apply our streamalert infrastructure if options.subcommand == 'build': # Make sure the Terraform is completely up to date - terraform_generate(config=CONFIG) + if not terraform_generate(config=CONFIG): + return # --target is for terraforming a specific streamalert module if options.target: target = options.target @@ -115,7 +118,8 @@ def terraform_handler(options): # generate terraform files elif options.subcommand == 'generate': - terraform_generate(config=CONFIG) + if not terraform_generate(config=CONFIG): + return elif options.subcommand == 'init-backend': run_command(['terraform', 'init']) @@ -126,8 +130,7 @@ def terraform_handler(options): # generate init Terraform files if not terraform_generate(config=CONFIG, init=True): - LOGGER_CLI.error('An error occured while generating Terraform files') - sys.exit(1) + return LOGGER_CLI.info('Initializing Terraform') if not run_command(['terraform', 'init']): @@ -149,9 +152,11 @@ def terraform_handler(options): # generate the main.tf with remote state enabled LOGGER_CLI.info('Configuring Terraform Remote State') - terraform_generate(config=CONFIG) + if not terraform_generate(config=CONFIG): + return + if not run_command(['terraform', 'init']): - sys.exit(1) + return LOGGER_CLI.info('Deploying Lambda Functions') # deploy both lambda functions @@ -174,13 +179,15 @@ def terraform_handler(options): # Migrate back to local state so Terraform can successfully # destroy the S3 bucket used by the backend. - terraform_generate(config=CONFIG, init=True) + if not terraform_generate(config=CONFIG, init=True): + return + if not run_command(['terraform', 'init']): - sys.exit(1) + return # Destroy all of the infrastructure if not tf_runner(action='destroy'): - sys.exit(1) + return # Remove old Terraform files terraform_clean() @@ -333,7 +340,9 @@ def rollback(options): targets = ['module.stream_alert_{}'.format(x) for x in CONFIG.clusters()] - terraform_generate(config=CONFIG) + if not terraform_generate(config=CONFIG): + return + tf_runner(targets=targets) @@ -400,7 +409,8 @@ def deploy_alert_processor(): publish_version(packages) # after the version is published and the config is written, generate the files # to ensure the alias is properly updated - terraform_generate(config=CONFIG) + if not terraform_generate(config=CONFIG): + return # apply the changes from publishing tf_runner(targets=targets) diff --git a/stream_alert_cli/terraform_generate.py b/stream_alert_cli/terraform_generate.py index 62d9825e5..bc49a9e96 100644 --- a/stream_alert_cli/terraform_generate.py +++ b/stream_alert_cli/terraform_generate.py @@ -198,6 +198,9 @@ def generate_stream_alert(cluster_name, cluster_dict, config): "timeout": 10 } } + + Returns: + [bool] Result of applying the stream_alert module """ account = config['global']['account'] modules = config['clusters'][cluster_name]['modules'] @@ -250,6 +253,8 @@ def generate_stream_alert(cluster_name, cluster_dict, config): 'alert_processor_vpc_security_group_ids': vpc_config['security_group_ids'] }) + return True + def generate_cloudwatch_monitoring(cluster_name, cluster_dict, config): """Add the CloudWatch Monitoring module to the Terraform cluster dict. @@ -259,6 +264,9 @@ def generate_cloudwatch_monitoring(cluster_name, cluster_dict, config): cluster_dict [defaultdict]: The dict containing all Terraform config for a given cluster. config [dict]: The loaded config from the 'conf/' directory + + Returns: + [bool] Result of applying the cloudwatch_monitoring module """ prefix = config['global']['account']['prefix'] cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)] = { @@ -271,6 +279,8 @@ def generate_cloudwatch_monitoring(cluster_name, cluster_dict, config): 'kinesis_stream': '{}_{}_stream_alert_kinesis'.format(prefix, cluster_name) } + return True + def generate_kinesis(cluster_name, cluster_dict, config): """Add the Kinesis module to the Terraform cluster dict. @@ -280,6 +290,9 @@ def generate_kinesis(cluster_name, cluster_dict, config): cluster_dict [defaultdict]: The dict containing all Terraform config for a given cluster. config [dict]: The loaded config from the 'conf/' directory + + Returns: + [bool] Result of applying the kinesis module """ logging_bucket = '{}.streamalert.s3-logging'.format( config['global']['account']['prefix']) @@ -303,6 +316,8 @@ def generate_kinesis(cluster_name, cluster_dict, config): 's3_logging_bucket': logging_bucket } + return True + def generate_outputs(cluster_name, cluster_dict, config): """Add the outputs to the Terraform cluster dict. @@ -312,12 +327,17 @@ def generate_outputs(cluster_name, cluster_dict, config): cluster_dict [defaultdict]: The dict containing all Terraform config for a given cluster. config [dict]: The loaded config from the 'conf/' directory + + Returns: + [bool] Result of applying all outputs """ for module, output_vars in config['clusters'][cluster_name]['outputs'].iteritems(): for output_var in output_vars: cluster_dict['output']['{}_{}_{}'.format(module, cluster_name, output_var)] = { 'value': '${{module.{}_{}.{}}}'.format(module, cluster_name, output_var)} + return True + def generate_kinesis_events(cluster_name, cluster_dict, config): """Add the Kinesis Events module to the Terraform cluster dict. @@ -327,6 +347,9 @@ def generate_kinesis_events(cluster_name, cluster_dict, config): cluster_dict [defaultdict]: The dict containing all Terraform config for a given cluster. config [dict]: The loaded config from the 'conf/' directory + + Returns: + [bool] Result of applying the kinesis_events module """ kinesis_events_enabled = bool( config['clusters'][cluster_name]['modules']['kinesis_events']['enabled']) @@ -340,6 +363,8 @@ def generate_kinesis_events(cluster_name, cluster_dict, config): 'role_policy_prefix': cluster_name } + return True + def generate_cloudtrail(cluster_name, cluster_dict, config): """Add the CloudTrail module to the Terraform cluster dict. @@ -349,6 +374,9 @@ def generate_cloudtrail(cluster_name, cluster_dict, config): cluster_dict [defaultdict]: The dict containing all Terraform config for a given cluster. config [dict]: The loaded config from the 'conf/' directory + + Returns: + [bool] Result of applying the cloudtrail module """ modules = config['clusters'][cluster_name]['modules'] cloudtrail_enabled = bool(modules['cloudtrail']['enabled']) @@ -376,7 +404,7 @@ def generate_cloudtrail(cluster_name, cluster_dict, config): } if not set(event_pattern.keys()).issubset(valid_event_pattern_keys): LOGGER_CLI.error('Invalid CloudWatch Event Pattern!') - sys.exit(1) + return False cluster_dict['module']['cloudtrail_{}'.format(cluster_name)] = { 'account_id': config['global']['account']['aws_account_id'], @@ -392,6 +420,8 @@ def generate_cloudtrail(cluster_name, cluster_dict, config): 'event_pattern': json.dumps(event_pattern) } + return True + def generate_flow_logs(cluster_name, cluster_dict, config): """Add the VPC Flow Logs module to the Terraform cluster dict. @@ -401,6 +431,9 @@ def generate_flow_logs(cluster_name, cluster_dict, config): cluster_dict [defaultdict]: The dict containing all Terraform config for a given cluster. config [dict]: The loaded config from the 'conf/' directory + + Returns: + [bool] Result of applying the flow_logs module """ modules = config['clusters'][cluster_name]['modules'] flow_log_group_name_default = '{}_{}_streamalert_flow_logs'.format( @@ -419,6 +452,10 @@ def generate_flow_logs(cluster_name, cluster_dict, config): if input_data: cluster_dict['module']['flow_logs_{}'.format( cluster_name)][flow_log_input] = input_data + return True + else: + LOGGER_CLI.info('Flow logs disabled, nothing to do') + return False def generate_s3_events(cluster_name, cluster_dict, config): @@ -429,9 +466,13 @@ def generate_s3_events(cluster_name, cluster_dict, config): cluster_dict [defaultdict]: The dict containing all Terraform config for a given cluster. config [dict]: The loaded config from the 'conf/' directory + + Returns: + [bool] Result of applying the s3_events module """ modules = config['clusters'][cluster_name]['modules'] s3_bucket_id = modules['s3_events'].get('s3_bucket_id') + if s3_bucket_id: cluster_dict['module']['s3_events_{}'.format(cluster_name)] = { 'source': 'modules/tf_stream_alert_s3_events', @@ -441,11 +482,12 @@ def generate_s3_events(cluster_name, cluster_dict, config): cluster_name), 's3_bucket_id': s3_bucket_id, 's3_bucket_arn': 'arn:aws:s3:::{}'.format(s3_bucket_id)} + return True else: LOGGER_CLI.error( 'Config Error: Missing S3 bucket in %s s3_events module', cluster_name) - sys.exit(1) + return False def generate_cluster(**kwargs): @@ -454,39 +496,49 @@ def generate_cluster(**kwargs): Keyword Args: cluster_name [string]: The name of the currently generating cluster config [dict]: The loaded config from the 'conf/' directory + + Returns: + [dict] generated Terraform cluster dictionary """ config = kwargs.get('config') cluster_name = kwargs.get('cluster_name') account = config['global']['account'] - modules = config['clusters'][cluster_name]['modules'] cluster_dict = infinitedict() - generate_stream_alert(cluster_name, cluster_dict, config) + if not generate_stream_alert(cluster_name, cluster_dict, config): + return if modules['cloudwatch_monitoring']['enabled']: - generate_cloudwatch_monitoring(cluster_name, cluster_dict, config) + if not generate_cloudwatch_monitoring(cluster_name, cluster_dict, config): + return - generate_kinesis(cluster_name, cluster_dict, config) + if not generate_kinesis(cluster_name, cluster_dict, config): + return outputs = config['clusters'][cluster_name].get('outputs') if outputs: - generate_outputs(cluster_name, cluster_dict, config) + if not generate_outputs(cluster_name, cluster_dict, config): + return - generate_kinesis_events(cluster_name, cluster_dict, config) + if not generate_kinesis_events(cluster_name, cluster_dict, config): + return cloudtrail_info = modules.get('cloudtrail') if cloudtrail_info: - generate_cloudtrail(cluster_name, cluster_dict, config) + if not generate_cloudtrail(cluster_name, cluster_dict, config): + return flow_log_info = modules.get('flow_logs') if flow_log_info: - generate_flow_logs(cluster_name, cluster_dict, config) + if not generate_flow_logs(cluster_name, cluster_dict, config): + return s3_events_info = modules.get('s3_events') if s3_events_info: - generate_s3_events(cluster_name, cluster_dict, config) + if not generate_s3_events(cluster_name, cluster_dict, config): + return return cluster_dict @@ -497,6 +549,9 @@ def terraform_generate(**kwargs): Keyword Args: config [dict]: The loaded config from the 'conf/' directory init [bool]: Indicates if main.tf is generated for `terraform init` + + Returns: + [bool]: Result of cluster generating """ config = kwargs.get('config') init = kwargs.get('init', False) @@ -521,8 +576,13 @@ def terraform_generate(**kwargs): raise InvalidClusterName('Rename cluster "main" to something else!') LOGGER_CLI.info('Generating cluster file: %s.tf', cluster) + cluster_dict = generate_cluster(cluster_name=cluster, config=config) + if not cluster_dict: + LOGGER_CLI.error('An error was generated while creating the %s cluster', cluster) + return False + cluster_json = json.dumps( - generate_cluster(cluster_name=cluster, config=config), + cluster_dict, indent=2, sort_keys=True ) From 61d5a1395894715c1cbd5761b8febb760f13793e Mon Sep 17 00:00:00 2001 From: Jack Naglieri Date: Tue, 27 Jun 2017 11:29:31 -0700 Subject: [PATCH 10/10] [cli] autopep8 on runner/tf generate --- stream_alert_cli/runner.py | 4 ++-- stream_alert_cli/terraform_generate.py | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/stream_alert_cli/runner.py b/stream_alert_cli/runner.py index a1c080b09..bd7d30de3 100644 --- a/stream_alert_cli/runner.py +++ b/stream_alert_cli/runner.py @@ -90,8 +90,8 @@ def terraform_check(): 'your $PATH:\n' '\t$ export PATH=$PATH:/usr/local/terraform/bin') return run_command(['terraform', 'version'], - error_message=prereqs_message, - quiet=True) + error_message=prereqs_message, + quiet=True) def terraform_handler(options): diff --git a/stream_alert_cli/terraform_generate.py b/stream_alert_cli/terraform_generate.py index bc49a9e96..2ade7ce4e 100644 --- a/stream_alert_cli/terraform_generate.py +++ b/stream_alert_cli/terraform_generate.py @@ -383,7 +383,8 @@ def generate_cloudtrail(cluster_name, cluster_dict, config): existing_trail_default = False existing_trail = modules['cloudtrail'].get('existing_trail', existing_trail_default) is_global_trail_default = True - is_global_trail = modules['cloudtrail'].get('is_global_trail', is_global_trail_default) + is_global_trail = modules['cloudtrail'].get( + 'is_global_trail', is_global_trail_default) event_pattern_default = { 'account': [config['global']['account']['aws_account_id']] } @@ -440,7 +441,8 @@ def generate_flow_logs(cluster_name, cluster_dict, config): config['global']['account']['prefix'], cluster_name ) - flow_log_group_name = modules['flow_logs'].get('log_group_name', flow_log_group_name_default) + flow_log_group_name = modules['flow_logs'].get( + 'log_group_name', flow_log_group_name_default) if modules['flow_logs']['enabled']: cluster_dict['module']['flow_logs_{}'.format(cluster_name)] = { @@ -578,7 +580,8 @@ def terraform_generate(**kwargs): LOGGER_CLI.info('Generating cluster file: %s.tf', cluster) cluster_dict = generate_cluster(cluster_name=cluster, config=config) if not cluster_dict: - LOGGER_CLI.error('An error was generated while creating the %s cluster', cluster) + LOGGER_CLI.error( + 'An error was generated while creating the %s cluster', cluster) return False cluster_json = json.dumps(