Skip to content

Commit

Permalink
Merge pull request #4312 from gheine/batch_event_target
Browse files Browse the repository at this point in the history
Add batch job as a cloudwatch_event_target
  • Loading branch information
bflad authored Apr 23, 2018
2 parents a28d44b + ac458d8 commit c21a2eb
Show file tree
Hide file tree
Showing 3 changed files with 255 additions and 0 deletions.
72 changes: 72 additions & 0 deletions aws/resource_aws_cloudwatch_event_target.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,34 @@ func resourceAwsCloudWatchEventTarget() *schema.Resource {
},
},

"batch_target": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"job_definition": {
Type: schema.TypeString,
Required: true,
},
"job_name": {
Type: schema.TypeString,
Required: true,
},
"array_size": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(2, 10000),
},
"job_attempts": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(1, 10),
},
},
},
},

"input_transformer": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -209,6 +237,12 @@ func resourceAwsCloudWatchEventTargetRead(d *schema.ResourceData, meta interface
}
}

if t.BatchParameters != nil {
if err := d.Set("batch_target", flattenAwsCloudWatchEventTargetBatchParameters(t.BatchParameters)); err != nil {
return fmt.Errorf("[DEBUG] Error setting batch_target error: %#v", err)
}
}

if t.InputTransformer != nil {
if err := d.Set("input_transformer", flattenAwsCloudWatchInputTransformer(t.InputTransformer)); err != nil {
return fmt.Errorf("[DEBUG] Error setting input_transformer error: %#v", err)
Expand Down Expand Up @@ -299,6 +333,9 @@ func buildPutTargetInputStruct(d *schema.ResourceData) *events.PutTargetsInput {
if v, ok := d.GetOk("ecs_target"); ok {
e.EcsParameters = expandAwsCloudWatchEventTargetEcsParameters(v.([]interface{}))
}
if v, ok := d.GetOk("batch_target"); ok {
e.BatchParameters = expandAwsCloudWatchEventTargetBatchParameters(v.([]interface{}))
}

if v, ok := d.GetOk("input_transformer"); ok {
e.InputTransformer = expandAwsCloudWatchEventTransformerParameters(v.([]interface{}))
Expand Down Expand Up @@ -344,6 +381,27 @@ func expandAwsCloudWatchEventTargetEcsParameters(config []interface{}) *events.E
return ecsParameters
}

func expandAwsCloudWatchEventTargetBatchParameters(config []interface{}) *events.BatchParameters {
batchParameters := &events.BatchParameters{}
for _, c := range config {
param := c.(map[string]interface{})
batchParameters.JobDefinition = aws.String(param["job_definition"].(string))
batchParameters.JobName = aws.String(param["job_name"].(string))
if v, ok := param["array_size"].(int); ok && v > 1 && v <= 10000 {
arrayProperties := &events.BatchArrayProperties{}
arrayProperties.Size = aws.Int64(int64(v))
batchParameters.ArrayProperties = arrayProperties
}
if v, ok := param["job_attempts"].(int); ok && v > 0 && v <= 10 {
retryStrategy := &events.BatchRetryStrategy{}
retryStrategy.Attempts = aws.Int64(int64(v))
batchParameters.RetryStrategy = retryStrategy
}
}

return batchParameters
}

func expandAwsCloudWatchEventTransformerParameters(config []interface{}) *events.InputTransformer {
transformerParameters := &events.InputTransformer{}

Expand Down Expand Up @@ -385,6 +443,20 @@ func flattenAwsCloudWatchEventTargetEcsParameters(ecsParameters *events.EcsParam
return result
}

func flattenAwsCloudWatchEventTargetBatchParameters(batchParameters *events.BatchParameters) []map[string]interface{} {
config := make(map[string]interface{})
config["job_definition"] = aws.StringValue(batchParameters.JobDefinition)
config["job_name"] = aws.StringValue(batchParameters.JobName)
if batchParameters.ArrayProperties != nil {
config["array_size"] = int(aws.Int64Value(batchParameters.ArrayProperties.Size))
}
if batchParameters.RetryStrategy != nil {
config["job_attempts"] = int(aws.Int64Value(batchParameters.RetryStrategy.Attempts))
}
result := []map[string]interface{}{config}
return result
}

func flattenAwsCloudWatchInputTransformer(inputTransformer *events.InputTransformer) []map[string]interface{} {
config := make(map[string]interface{})
inputPathsMap := make(map[string]string)
Expand Down
175 changes: 175 additions & 0 deletions aws/resource_aws_cloudwatch_event_target_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,26 @@ func TestAccAWSCloudWatchEventTarget_ecs(t *testing.T) {
},
})
}

func TestAccAWSCloudWatchEventTarget_batch(t *testing.T) {
var target events.Target
rName := acctest.RandomWithPrefix("tf_batch_target")

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCloudWatchEventTargetDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSCloudWatchEventTargetConfigBatch(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudWatchEventTargetExists("aws_cloudwatch_event_target.test", &target),
),
},
},
})
}

func TestAccAWSCloudWatchEventTarget_input_transformer(t *testing.T) {
var target events.Target
rName := acctest.RandomWithPrefix("tf_input_transformer")
Expand Down Expand Up @@ -473,6 +493,161 @@ EOF
}`, rName, rName, rName, rName, rName)
}

func testAccAWSCloudWatchEventTargetConfigBatch(rName string) string {
return fmt.Sprintf(`
resource "aws_cloudwatch_event_rule" "cloudwatch_event_rule" {
name = "%[1]s"
description = "schedule_batch_test"
schedule_expression = "rate(5 minutes)"
}
resource "aws_cloudwatch_event_target" "test" {
arn = "${aws_batch_job_queue.batch_job_queue.arn}"
rule = "${aws_cloudwatch_event_rule.cloudwatch_event_rule.id}"
role_arn = "${aws_iam_role.event_iam_role.arn}"
batch_target {
job_definition = "${aws_batch_job_definition.batch_job_definition.arn}"
job_name = "%[1]s"
}
depends_on = [
"aws_batch_job_queue.batch_job_queue",
"aws_batch_job_definition.batch_job_definition",
"aws_iam_role.event_iam_role",
]
}
resource "aws_iam_role" "event_iam_role" {
name = "event_%[1]s"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "events.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role" "ecs_iam_role" {
name = "ecs_%[1]s"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "ecs_policy_attachment" {
role = "${aws_iam_role.ecs_iam_role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "iam_instance_profile" {
name = "ecs_%[1]s"
role = "${aws_iam_role.ecs_iam_role.name}"
}
resource "aws_iam_role" "batch_iam_role" {
name = "batch_%[1]s"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "batch.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "batch_policy_attachment" {
role = "${aws_iam_role.batch_iam_role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"
}
resource "aws_security_group" "security_group" {
name = "%[1]s"
}
resource "aws_vpc" "vpc" {
cidr_block = "10.1.0.0/16"
}
resource "aws_subnet" "subnet" {
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = "10.1.1.0/24"
}
resource "aws_batch_compute_environment" "batch_compute_environment" {
compute_environment_name = "%[1]s"
compute_resources {
instance_role = "${aws_iam_instance_profile.iam_instance_profile.arn}"
instance_type = [
"c4.large",
]
max_vcpus = 16
min_vcpus = 0
security_group_ids = [
"${aws_security_group.security_group.id}"
]
subnets = [
"${aws_subnet.subnet.id}"
]
type = "EC2"
}
service_role = "${aws_iam_role.batch_iam_role.arn}"
type = "MANAGED"
depends_on = ["aws_iam_role_policy_attachment.batch_policy_attachment"]
}
resource "aws_batch_job_queue" "batch_job_queue" {
name = "%[1]s"
state = "ENABLED"
priority = 1
compute_environments = ["${aws_batch_compute_environment.batch_compute_environment.arn}"]
}
resource "aws_batch_job_definition" "batch_job_definition" {
name = "%[1]s"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"command": ["ls", "-la"],
"image": "busybox",
"memory": 512,
"vcpus": 1,
"volumes": [ ],
"environment": [ ],
"mountPoints": [ ],
"ulimits": [ ]
}
CONTAINER_PROPERTIES
}
`, rName)
}

func testAccAWSCloudWatchEventTargetConfigInputTransformer(rName string) string {
return fmt.Sprintf(`
Expand Down
8 changes: 8 additions & 0 deletions website/docs/r/cloudwatch_event_target.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ The following arguments are supported:
* `role_arn` - (Optional) The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered. Required if `ecs_target` is used.
* `run_command_targets` - (Optional) Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed.
* `ecs_target` - (Optional) Parameters used when you are using the rule to invoke Amazon ECS Task. Documented below. A maximum of 1 are allowed.
* `batch_target` - (Optional) Parameters used when you are using the rule to invoke an Amazon Batch Job. Documented below. A maximum of 1 are allowed.
* `input_transformer` - (Optional) Parameters used when you are providing a custom input to a target based on certain event data.

`run_command_targets` support the following:
Expand All @@ -201,6 +202,13 @@ The following arguments are supported:
* `task_count` - (Optional) The number of tasks to create based on the TaskDefinition. The default is 1.
* `task_definition_arn` - (Required) The ARN of the task definition to use if the event target is an Amazon ECS cluster.

`batch_target` support the following:

* `job_definition` - (Required) The ARN or name of the job definition to use if the event target is an AWS Batch job. This job definition must already exist.
* `job_name` - (Required) The name to use for this execution of the job, if the target is an AWS Batch job.
* `array_size` - (Optional) The size of the array, if this is an array batch job. Valid values are integers between 2 and 10,000.
* `job_attempts` - (Optional) The number of times to attempt to retry, if the job fails. Valid values are 1 to 10.

`input_transformer` support the following:

* `input_paths` - (Optional) Key value pairs specified in the form of JSONPath (for example, time = $.time)
Expand Down

0 comments on commit c21a2eb

Please sign in to comment.