Skip to content

Commit

Permalink
New Resource: aws_s3_bucket_replication
Browse files Browse the repository at this point in the history
  • Loading branch information
elbuo8 committed Mar 5, 2018
1 parent 17b7168 commit eea0984
Show file tree
Hide file tree
Showing 6 changed files with 666 additions and 70 deletions.
1 change: 1 addition & 0 deletions aws/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,7 @@ func Provider() terraform.ResourceProvider {
"aws_s3_bucket": resourceAwsS3Bucket(),
"aws_s3_bucket_policy": resourceAwsS3BucketPolicy(),
"aws_s3_bucket_object": resourceAwsS3BucketObject(),
"aws_s3_bucket_replication": resourceAwsS3BucketReplication(),
"aws_s3_bucket_notification": resourceAwsS3BucketNotification(),
"aws_s3_bucket_metric": resourceAwsS3BucketMetric(),
"aws_security_group": resourceAwsSecurityGroup(),
Expand Down
143 changes: 74 additions & 69 deletions aws/resource_aws_s3_bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -994,25 +994,25 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
}

// Read the bucket replication configuration

replicationResponse, err := retryOnAwsCode("NoSuchBucket", func() (interface{}, error) {
return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{
Bucket: aws.String(d.Id()),
if _, ok := d.GetOk("replication_configuration"); ok {
replicationResponse, err := retryOnAwsCode("NoSuchBucket", func() (interface{}, error) {
return s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{
Bucket: aws.String(d.Id()),
})
})
})
if err != nil {
if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
return err
if err != nil {
if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
return err
}
}
}
replication := replicationResponse.(*s3.GetBucketReplicationOutput)
replication := replicationResponse.(*s3.GetBucketReplicationOutput)

log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication)
if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil {
log.Printf("[DEBUG] Error setting replication configuration: %s", err)
return err
log.Printf("[DEBUG] S3 Bucket: %s, read replication configuration: %v", d.Id(), replication)
if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil {
log.Printf("[DEBUG] Error setting replication configuration: %s", err)
return err
}
}

// Read the bucket server side encryption configuration

encryptionResponse, err := retryOnAwsCode("NoSuchBucket", func() (interface{}, error) {
Expand All @@ -1037,7 +1037,6 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
}
}
}

// Add the region as an attribute

locationResponse, err := retryOnAwsCode("NoSuchBucket", func() (interface{}, error) {
Expand Down Expand Up @@ -1691,60 +1690,8 @@ func resourceAwsS3BucketReplicationConfigurationUpdate(s3conn *s3.S3, d *schema.

c := replicationConfiguration[0].(map[string]interface{})

rc := &s3.ReplicationConfiguration{}
if val, ok := c["role"]; ok {
rc.Role = aws.String(val.(string))
}

rcRules := c["rules"].(*schema.Set).List()
rules := []*s3.ReplicationRule{}
for _, v := range rcRules {
rr := v.(map[string]interface{})
rcRule := &s3.ReplicationRule{
Prefix: aws.String(rr["prefix"].(string)),
Status: aws.String(rr["status"].(string)),
}

if rrid, ok := rr["id"]; ok {
rcRule.ID = aws.String(rrid.(string))
}

ruleDestination := &s3.Destination{}
if dest, ok := rr["destination"].(*schema.Set); ok && dest.Len() > 0 {
bd := dest.List()[0].(map[string]interface{})
ruleDestination.Bucket = aws.String(bd["bucket"].(string))

if storageClass, ok := bd["storage_class"]; ok && storageClass != "" {
ruleDestination.StorageClass = aws.String(storageClass.(string))
}

if replicaKmsKeyId, ok := bd["replica_kms_key_id"]; ok && replicaKmsKeyId != "" {
ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{
ReplicaKmsKeyID: aws.String(replicaKmsKeyId.(string)),
}
}
}
rcRule.Destination = ruleDestination

if ssc, ok := rr["source_selection_criteria"].(*schema.Set); ok && ssc.Len() > 0 {
sscValues := ssc.List()[0].(map[string]interface{})
ruleSsc := &s3.SourceSelectionCriteria{}
if sseKms, ok := sscValues["sse_kms_encrypted_objects"].(*schema.Set); ok && sseKms.Len() > 0 {
sseKmsValues := sseKms.List()[0].(map[string]interface{})
sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{}
if sseKmsValues["enabled"].(bool) {
sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled)
} else {
sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled)
}
ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects
}
rcRule.SourceSelectionCriteria = ruleSsc
}
rules = append(rules, rcRule)
}
rc := buildAwsS3BucketReplicationConfiguration(c)

rc.Rules = rules
i := &s3.PutBucketReplicationInput{
Bucket: aws.String(bucket),
ReplicationConfiguration: rc,
Expand Down Expand Up @@ -2002,6 +1949,64 @@ func flattenAwsS3BucketReplicationConfiguration(r *s3.ReplicationConfiguration)
return replication_configuration
}

func buildAwsS3BucketReplicationConfiguration(c map[string]interface{}) *s3.ReplicationConfiguration {
rc := &s3.ReplicationConfiguration{}
if val, ok := c["role"]; ok {
rc.Role = aws.String(val.(string))
}

rcRules := c["rules"].(*schema.Set).List()
rules := []*s3.ReplicationRule{}
for _, v := range rcRules {
rr := v.(map[string]interface{})
rcRule := &s3.ReplicationRule{
Prefix: aws.String(rr["prefix"].(string)),
Status: aws.String(rr["status"].(string)),
}

if rrid, ok := rr["id"]; ok {
rcRule.ID = aws.String(rrid.(string))
}

ruleDestination := &s3.Destination{}
if dest, ok := rr["destination"].(*schema.Set); ok && dest.Len() > 0 {
bd := dest.List()[0].(map[string]interface{})
ruleDestination.Bucket = aws.String(bd["bucket"].(string))

if storageClass, ok := bd["storage_class"]; ok && storageClass != "" {
ruleDestination.StorageClass = aws.String(storageClass.(string))
}

if replicaKmsKeyId, ok := bd["replica_kms_key_id"]; ok && replicaKmsKeyId != "" {
ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{
ReplicaKmsKeyID: aws.String(replicaKmsKeyId.(string)),
}
}
}
rcRule.Destination = ruleDestination

if ssc, ok := rr["source_selection_criteria"].(*schema.Set); ok && ssc.Len() > 0 {
sscValues := ssc.List()[0].(map[string]interface{})
ruleSsc := &s3.SourceSelectionCriteria{}
if sseKms, ok := sscValues["sse_kms_encrypted_objects"].(*schema.Set); ok && sseKms.Len() > 0 {
sseKmsValues := sseKms.List()[0].(map[string]interface{})
sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{}
if sseKmsValues["enabled"].(bool) {
sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled)
} else {
sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled)
}
ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects
}
rcRule.SourceSelectionCriteria = ruleSsc
}
rules = append(rules, rcRule)
}

rc.Rules = rules
return rc
}

func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) {
withNulls, err := json.Marshal(w)
if err != nil {
Expand Down
192 changes: 192 additions & 0 deletions aws/resource_aws_s3_bucket_replication.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
package aws

import (
"fmt"
"log"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform/helper/schema"
)

func resourceAwsS3BucketReplication() *schema.Resource {
return &schema.Resource{
Create: resourceAwsS3BucketReplicationPut,
Read: resourceAwsS3BucketReplicationRead,
Update: resourceAwsS3BucketReplicationPut,
Delete: resourceAwsS3BucketReplicationDelete,

Schema: map[string]*schema.Schema{
"bucket": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"replication_configuration": {
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"role": {
Type: schema.TypeString,
Required: true,
},
"rules": {
Type: schema.TypeSet,
Required: true,
Set: rulesHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateS3BucketReplicationRuleId,
},
"destination": {
Type: schema.TypeSet,
MaxItems: 1,
MinItems: 1,
Required: true,
Set: destinationHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bucket": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateArn,
},
"storage_class": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateS3BucketReplicationDestinationStorageClass,
},
"replica_kms_key_id": {
Type: schema.TypeString,
Optional: true,
},
},
},
},
"source_selection_criteria": {
Type: schema.TypeSet,
Optional: true,
MinItems: 1,
MaxItems: 1,
Set: sourceSelectionCriteriaHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sse_kms_encrypted_objects": {
Type: schema.TypeSet,
Optional: true,
MinItems: 1,
MaxItems: 1,
Set: sourceSseKmsObjectsHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enabled": {
Type: schema.TypeBool,
Required: true,
},
},
},
},
},
},
},
"prefix": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateS3BucketReplicationRulePrefix,
},
"status": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateS3BucketReplicationRuleStatus,
},
},
},
},
},
},
},
},
}
}

func resourceAwsS3BucketReplicationPut(d *schema.ResourceData, meta interface{}) error {
s3conn := meta.(*AWSClient).s3conn

bucket := d.Get("bucket").(string)

replicationConfiguration := d.Get("replication_configuration").([]interface{})
c := replicationConfiguration[0].(map[string]interface{})

rc := buildAwsS3BucketReplicationConfiguration(c)

params := &s3.PutBucketReplicationInput{
Bucket: aws.String(bucket),
ReplicationConfiguration: rc,
}

log.Printf("[DEBUG] S3 put bucket replication configuration: %#v", params)

_, err := s3conn.PutBucketReplication(params)

if err != nil {
return fmt.Errorf("Error putting S3 replication configuration: %s", err)
}

d.SetId(bucket)
return resourceAwsS3BucketReplicationRead(d, meta)
}

func resourceAwsS3BucketReplicationRead(d *schema.ResourceData, meta interface{}) error {
s3conn := meta.(*AWSClient).s3conn

log.Printf("[DEBUG] S3 bucket replication, reading for bucket: %s", d.Id())

replication, err := s3conn.GetBucketReplication(&s3.GetBucketReplicationInput{
Bucket: aws.String(d.Id()),
})

if err != nil {
if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() != 404 {
return err
}
}

log.Printf("[DEBUG] S3 bucket: %s, read replication configuration: %v", d.Id(), replication)

if r := replication.ReplicationConfiguration; r != nil {
if err := d.Set("replication_configuration", flattenAwsS3BucketReplicationConfiguration(replication.ReplicationConfiguration)); err != nil {
log.Printf("[DEBUG] Error setting replication configuration: %s", err)
return err
}
}

return nil
}

func resourceAwsS3BucketReplicationDelete(d *schema.ResourceData, meta interface{}) error {
s3conn := meta.(*AWSClient).s3conn

bucket := d.Get("bucket").(string)

log.Printf("[DEBUG] S3 bucket: %s, delete replication configuration", bucket)

_, err := s3conn.DeleteBucketReplication(&s3.DeleteBucketReplicationInput{
Bucket: aws.String(bucket),
})

if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchBucket" {
return nil
}
return fmt.Errorf("Error deleting S3 replication configuration: %s", err)
}

d.SetId("")
return nil
}
Loading

0 comments on commit eea0984

Please sign in to comment.