Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for S3 Object Lock legal holds, retention modes and retention periods #9942

Merged
merged 10 commits into from
Sep 18, 2019
15 changes: 15 additions & 0 deletions aws/data_source_aws_s3_bucket_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,18 @@ func dataSourceAwsS3BucketObject() *schema.Resource {
Type: schema.TypeMap,
Computed: true,
},
"object_lock_legal_hold_status": {
Type: schema.TypeString,
Computed: true,
},
"object_lock_mode": {
Type: schema.TypeString,
Computed: true,
},
"object_lock_retain_until_date": {
Type: schema.TypeString,
Computed: true,
},
"range": {
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -155,6 +167,9 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e
d.Set("expires", out.Expires)
d.Set("last_modified", out.LastModified.Format(time.RFC1123))
d.Set("metadata", pointersMapToStringList(out.Metadata))
d.Set("object_lock_legal_hold_status", out.ObjectLockLegalHoldStatus)
d.Set("object_lock_mode", out.ObjectLockMode)
d.Set("object_lock_retain_until_date", flattenS3ObjectLockRetainUntilDate(out.ObjectLockRetainUntilDate))
d.Set("server_side_encryption", out.ServerSideEncryption)
d.Set("sse_kms_key_id", out.SSEKMSKeyId)
d.Set("version_id", out.VersionId)
Expand Down
153 changes: 153 additions & 0 deletions aws/data_source_aws_s3_bucket_object_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"fmt"
"regexp"
"testing"
"time"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
Expand Down Expand Up @@ -39,6 +40,9 @@ func TestAccDataSourceAWSS3BucketObject_basic(t *testing.T) {
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"),
resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified",
regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""),
resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"),
),
},
Expand Down Expand Up @@ -73,6 +77,9 @@ func TestAccDataSourceAWSS3BucketObject_readableBody(t *testing.T) {
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "a6105c0a611b41b08f1209506350279e"),
resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified",
regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "yes"),
),
},
Expand Down Expand Up @@ -110,6 +117,9 @@ func TestAccDataSourceAWSS3BucketObject_kmsEncrypted(t *testing.T) {
regexp.MustCompile(`^arn:aws:kms:[a-z]{2}-[a-z]+-\d{1}:[0-9]{12}:key/[a-z0-9-]{36}$`)),
resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified",
regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "Keep Calm and Carry On"),
),
},
Expand Down Expand Up @@ -161,6 +171,84 @@ func TestAccDataSourceAWSS3BucketObject_allParams(t *testing.T) {
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "website_redirect_location", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "metadata.%", "0"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "tags.%", "1"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""),
),
},
},
})
}

func TestAccDataSourceAWSS3BucketObject_ObjectLockLegalHoldOff(t *testing.T) {
rInt := acctest.RandInt()
resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOff(rInt)

var rObj s3.GetObjectOutput
var dsObj s3.GetObjectOutput

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
PreventPostDestroyRefresh: true,
Steps: []resource.TestStep{
{
Config: resourceOnlyConf,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj),
),
},
{
Config: conf,
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "11"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "binary/octet-stream"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"),
resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified",
regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", "OFF"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""),
resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"),
),
},
},
})
}

func TestAccDataSourceAWSS3BucketObject_ObjectLockLegalHoldOn(t *testing.T) {
rInt := acctest.RandInt()
retainUntilDate := time.Now().UTC().AddDate(0, 0, 10).Format(time.RFC3339)
resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOn(rInt, retainUntilDate)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For the future it really is hard to gain context around what these variables represent give the function signature. We should try to use more descriptive names here. We should also look at ways for structuring the resource and data source configurations a bit to make tests like this easier. Then again the resource test should cover the testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), case.

No action items here. Just mentioning for future reference.


var rObj s3.GetObjectOutput
var dsObj s3.GetObjectOutput

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
PreventPostDestroyRefresh: true,
Steps: []resource.TestStep{
{
Config: resourceOnlyConf,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj),
),
},
{
Config: conf,
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "11"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "binary/octet-stream"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"),
resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified",
regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", "ON"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", "GOVERNANCE"),
resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", retainUntilDate),
resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"),
),
},
},
Expand Down Expand Up @@ -303,3 +391,68 @@ data "aws_s3_bucket_object" "obj" {

return resources, both
}

func testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOff(randInt int) (string, string) {
resources := fmt.Sprintf(`
resource "aws_s3_bucket" "object_bucket" {
bucket = "tf-object-test-bucket-%d"

versioning {
enabled = true
}

object_lock_configuration {
object_lock_enabled = "Enabled"
}
}
resource "aws_s3_bucket_object" "object" {
bucket = "${aws_s3_bucket.object_bucket.bucket}"
key = "tf-testing-obj-%d"
content = "Hello World"
object_lock_legal_hold_status = "OFF"
}
`, randInt, randInt)

both := fmt.Sprintf(`%s
data "aws_s3_bucket_object" "obj" {
bucket = "tf-object-test-bucket-%d"
key = "tf-testing-obj-%d"
}
`, resources, randInt, randInt)

return resources, both
}

func testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOn(randInt int, retainUntilDate string) (string, string) {
resources := fmt.Sprintf(`
resource "aws_s3_bucket" "object_bucket" {
bucket = "tf-object-test-bucket-%d"

versioning {
enabled = true
}

object_lock_configuration {
object_lock_enabled = "Enabled"
}
}
resource "aws_s3_bucket_object" "object" {
bucket = "${aws_s3_bucket.object_bucket.bucket}"
key = "tf-testing-obj-%d"
content = "Hello World"
force_destroy = true
object_lock_legal_hold_status = "ON"
object_lock_mode = "GOVERNANCE"
object_lock_retain_until_date = "%s"
}
`, randInt, randInt, retainUntilDate)

both := fmt.Sprintf(`%s
data "aws_s3_bucket_object" "obj" {
bucket = "tf-object-test-bucket-%d"
key = "tf-testing-obj-%d"
}
`, resources, randInt, randInt)

return resources, both
}
54 changes: 11 additions & 43 deletions aws/resource_aws_s3_bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -567,8 +567,8 @@ func resourceAwsS3Bucket() *schema.Resource {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{
s3.ObjectLockModeGovernance,
Copy link
Contributor Author

@ewbankkit ewbankkit Sep 2, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Used the correct enumeration here (though the values are identical).

s3.ObjectLockModeCompliance,
s3.ObjectLockRetentionModeGovernance,
s3.ObjectLockRetentionModeCompliance,
}, false),
},

Expand Down Expand Up @@ -1259,49 +1259,17 @@ func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error {
// bucket may have things delete them
log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err)

bucket := d.Get("bucket").(string)
resp, err := s3conn.ListObjectVersions(
&s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
},
)

if err != nil {
return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err)
}

objectsToDelete := make([]*s3.ObjectIdentifier, 0)

if len(resp.DeleteMarkers) != 0 {

for _, v := range resp.DeleteMarkers {
objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
Key: v.Key,
VersionId: v.VersionId,
})
}
// Delete everything including locked objects.
// Don't ignore any object errors or we could recurse infinitely.
var objectLockEnabled bool
objectLockConfiguration := expandS3ObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{}))
if objectLockConfiguration != nil {
objectLockEnabled = aws.StringValue(objectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled
}

if len(resp.Versions) != 0 {
for _, v := range resp.Versions {
objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
Key: v.Key,
VersionId: v.VersionId,
})
}
}

params := &s3.DeleteObjectsInput{
Bucket: aws.String(bucket),
Delete: &s3.Delete{
Objects: objectsToDelete,
},
}

_, err = s3conn.DeleteObjects(params)
err = deleteAllS3ObjectVersions(s3conn, d.Id(), "", objectLockEnabled, false)

if err != nil {
return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err)
return fmt.Errorf("error S3 Bucket force_destroy: %s", err)
}

This comment was marked as outdated.


// this line recurses until all objects are deleted or an error is returned
Expand Down Expand Up @@ -2472,7 +2440,7 @@ type S3Website struct {
// S3 Object Lock functions.
//

func readS3ObjectLockConfiguration(conn *s3.S3, bucket string) (interface{}, error) {
func readS3ObjectLockConfiguration(conn *s3.S3, bucket string) ([]interface{}, error) {
resp, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) {
return conn.GetObjectLockConfiguration(&s3.GetObjectLockConfigurationInput{
Bucket: aws.String(bucket),
Expand Down
Loading