From 766e77f69dc9b4201574258e5f18fdbb0aac7d71 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 30 Aug 2019 15:25:54 -0400 Subject: [PATCH 01/10] d/aws_s3_bucket_object: Add 'object_lock_legal_hold_status' attribute. --- aws/data_source_aws_s3_bucket_object.go | 5 ++ aws/data_source_aws_s3_bucket_object_test.go | 70 +++++++++++++++++++ website/docs/d/s3_bucket_object.html.markdown | 1 + 3 files changed, 76 insertions(+) diff --git a/aws/data_source_aws_s3_bucket_object.go b/aws/data_source_aws_s3_bucket_object.go index 5be6586d0b8..776c808adf8 100644 --- a/aws/data_source_aws_s3_bucket_object.go +++ b/aws/data_source_aws_s3_bucket_object.go @@ -74,6 +74,10 @@ func dataSourceAwsS3BucketObject() *schema.Resource { Type: schema.TypeMap, Computed: true, }, + "object_lock_legal_hold_status": { + Type: schema.TypeString, + Computed: true, + }, "range": { Type: schema.TypeString, Optional: true, @@ -155,6 +159,7 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e d.Set("expires", out.Expires) d.Set("last_modified", out.LastModified.Format(time.RFC1123)) d.Set("metadata", pointersMapToStringList(out.Metadata)) + d.Set("object_lock_legal_hold_status", out.ObjectLockLegalHoldStatus) d.Set("server_side_encryption", out.ServerSideEncryption) d.Set("sse_kms_key_id", out.SSEKMSKeyId) d.Set("version_id", out.VersionId) diff --git a/aws/data_source_aws_s3_bucket_object_test.go b/aws/data_source_aws_s3_bucket_object_test.go index a547fbf7303..d401729c51f 100644 --- a/aws/data_source_aws_s3_bucket_object_test.go +++ b/aws/data_source_aws_s3_bucket_object_test.go @@ -39,6 +39,7 @@ func TestAccDataSourceAWSS3BucketObject_basic(t *testing.T) { resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"), resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), ), }, @@ -73,6 +74,7 @@ func TestAccDataSourceAWSS3BucketObject_readableBody(t *testing.T) { resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "a6105c0a611b41b08f1209506350279e"), resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "yes"), ), }, @@ -110,6 +112,7 @@ func TestAccDataSourceAWSS3BucketObject_kmsEncrypted(t *testing.T) { regexp.MustCompile(`^arn:aws:kms:[a-z]{2}-[a-z]+-\d{1}:[0-9]{12}:key/[a-z0-9-]{36}$`)), resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "Keep Calm and Carry On"), ), }, @@ -161,6 +164,42 @@ func TestAccDataSourceAWSS3BucketObject_allParams(t *testing.T) { resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "website_redirect_location", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "metadata.%", "0"), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "tags.%", "1"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), + ), + }, + }, + }) +} + +func TestAccDataSourceAWSS3BucketObject_ObjectLockLegalHold(t *testing.T) { + rInt := acctest.RandInt() + resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_objectLockLegalHold(rInt) + + var rObj s3.GetObjectOutput + var dsObj s3.GetObjectOutput + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: resourceOnlyConf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), + ), + }, + { + Config: conf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "11"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "binary/octet-stream"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"), + resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", + regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", "OFF"), + resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), ), }, }, @@ -303,3 +342,34 @@ data "aws_s3_bucket_object" "obj" { return resources, both } + +func testAccAWSDataSourceS3ObjectConfig_objectLockLegalHold(randInt int) (string, string) { + resources := fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + + versioning { + enabled = true + } + + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "tf-testing-obj-%d" + content = "Hello World" + object_lock_legal_hold_status = "OFF" +} +`, randInt, randInt) + + both := fmt.Sprintf(`%s +data "aws_s3_bucket_object" "obj" { + bucket = "tf-object-test-bucket-%d" + key = "tf-testing-obj-%d" +} +`, resources, randInt, randInt) + + return resources, both +} diff --git a/website/docs/d/s3_bucket_object.html.markdown b/website/docs/d/s3_bucket_object.html.markdown index d4f789e7733..86603db384b 100644 --- a/website/docs/d/s3_bucket_object.html.markdown +++ b/website/docs/d/s3_bucket_object.html.markdown @@ -77,6 +77,7 @@ In addition to all arguments above, the following attributes are exported: * `expires` - The date and time at which the object is no longer cacheable. * `last_modified` - Last modified date of the object in RFC1123 format (e.g. `Mon, 02 Jan 2006 15:04:05 MST`) * `metadata` - A map of metadata stored with the object in S3 +* `object_lock_legal_hold_status` - Indicates whether this object has an active [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds). This field is only returned if you have permission to view an object's legal hold status. * `server_side_encryption` - If the object is stored using server-side encryption (KMS or Amazon S3-managed encryption key), this field includes the chosen encryption and algorithm used. * `sse_kms_key_id` - If present, specifies the ID of the Key Management Service (KMS) master encryption key that was used for the object. * `storage_class` - [Storage class](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) information of the object. Available for all objects except for `Standard` storage class objects. From 0abfb3ecdb23c65ef434a2e028afdb030bc04bd5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 30 Aug 2019 17:29:07 -0400 Subject: [PATCH 02/10] r/aws_s3_bucket_object: Add 'object_lock_legal_hold_status' attribute. --- aws/resource_aws_s3_bucket_object.go | 27 ++++ aws/resource_aws_s3_bucket_object_test.go | 126 ++++++++++++++++++ website/docs/r/s3_bucket_object.html.markdown | 1 + 3 files changed, 154 insertions(+) diff --git a/aws/resource_aws_s3_bucket_object.go b/aws/resource_aws_s3_bucket_object.go index 5a2d3e37951..ca8c6daaa44 100644 --- a/aws/resource_aws_s3_bucket_object.go +++ b/aws/resource_aws_s3_bucket_object.go @@ -162,6 +162,15 @@ func resourceAwsS3BucketObject() *schema.Resource { Type: schema.TypeString, Optional: true, }, + + "object_lock_legal_hold_status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + s3.ObjectLockLegalHoldStatusOn, + s3.ObjectLockLegalHoldStatusOff, + }, false), + }, }, } } @@ -263,6 +272,10 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro putInput.WebsiteRedirectLocation = aws.String(v.(string)) } + if v, ok := d.GetOk("object_lock_legal_hold_status"); ok { + putInput.ObjectLockLegalHoldStatus = aws.String(v.(string)) + } + if _, err := s3conn.PutObject(putInput); err != nil { return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err) } @@ -317,6 +330,7 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err d.Set("version_id", resp.VersionId) d.Set("server_side_encryption", resp.ServerSideEncryption) d.Set("website_redirect", resp.WebsiteRedirectLocation) + d.Set("object_lock_legal_hold_status", resp.ObjectLockLegalHoldStatus) // Only set non-default KMS key ID (one that doesn't match default) if resp.SSEKMSKeyId != nil { @@ -387,6 +401,19 @@ func resourceAwsS3BucketObjectUpdate(d *schema.ResourceData, meta interface{}) e } } + if d.HasChange("object_lock_legal_hold_status") { + _, err := conn.PutObjectLegalHold(&s3.PutObjectLegalHoldInput{ + Bucket: aws.String(d.Get("bucket").(string)), + Key: aws.String(d.Get("key").(string)), + LegalHold: &s3.ObjectLockLegalHold{ + Status: aws.String(d.Get("object_lock_legal_hold_status").(string)), + }, + }) + if err != nil { + return fmt.Errorf("error putting S3 object lock legal hold: %s", err) + } + } + if err := setTagsS3Object(conn, d); err != nil { return fmt.Errorf("error setting S3 object tags: %s", err) } diff --git a/aws/resource_aws_s3_bucket_object_test.go b/aws/resource_aws_s3_bucket_object_test.go index c733339a3e9..d0ee4b976b4 100644 --- a/aws/resource_aws_s3_bucket_object_test.go +++ b/aws/resource_aws_s3_bucket_object_test.go @@ -346,6 +346,7 @@ func TestAccAWSS3BucketObject_updates(t *testing.T) { testAccCheckAWSS3BucketObjectExists(resourceName, &originalObj), testAccCheckAWSS3BucketObjectBody(&originalObj, "initial object state"), resource.TestCheckResourceAttr(resourceName, "etag", "647d1d58e1011c743ec67d5e8af87b53"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), ), }, { @@ -354,6 +355,7 @@ func TestAccAWSS3BucketObject_updates(t *testing.T) { testAccCheckAWSS3BucketObjectExists(resourceName, &modifiedObj), testAccCheckAWSS3BucketObjectBody(&modifiedObj, "modified object"), resource.TestCheckResourceAttr(resourceName, "etag", "1c7fd13df1515c2a13ad9eb068931f09"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), ), }, }, @@ -759,6 +761,87 @@ func TestAccAWSS3BucketObject_tagsLeadingSlash(t *testing.T) { }) } +func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithNone(t *testing.T) { + var obj1, obj2, obj3 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketObjectConfig_noObjectLockLegalHold(rInt, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "stuff", "ON"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "ON"), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "stuff", "OFF"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), + testAccCheckAWSS3BucketObjectBody(&obj3, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), + ), + }, + // Remove legal hold but create a new object version to test force_destroy + // { + // Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "changed stuff", "OFF"), + // Check: resource.ComposeTestCheckFunc( + // testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), + // testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj3, &obj2), + // testAccCheckAWSS3BucketObjectBody(&obj3, "changed stuff"), + // resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), + // ), + // }, + }, + }) +} + +func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithOn(t *testing.T) { + var obj1, obj2 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "stuff", "ON"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "ON"), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "stuff", "OFF"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), + ), + }, + }, + }) +} + func testAccCheckAWSS3BucketObjectVersionIdDiffers(first, second *s3.GetObjectOutput) resource.TestCheckFunc { return func(s *terraform.State) error { if first.VersionId == nil { @@ -1227,3 +1310,46 @@ resource "aws_s3_bucket_object" "object" { } `, randInt, metadataKey1, metadataValue1, metadataKey2, metadataValue2) } + +func testAccAWSS3BucketObjectConfig_noObjectLockLegalHold(randInt int, content string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + versioning { + enabled = true + } + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "%s" + //force_destroy = true +} +`, randInt, content) +} + +func testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(randInt int, content, legalHoldStatus string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + versioning { + enabled = true + } + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "%s" + object_lock_legal_hold_status = "%s" + //force_destroy = true +} +`, randInt, content, legalHoldStatus) +} diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index 9944a564485..a153657b7e7 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -109,6 +109,7 @@ use the exported `arn` attribute: `kms_key_id = "${aws_kms_key.foo.arn}"` * `metadata` - (Optional) A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API). * `tags` - (Optional) A mapping of tags to assign to the object. +* `object_lock_legal_hold_status` - (Optional) Indicates whether this object has an active [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds). Valid values are `ON` and `OFF`. If no content is provided through `source`, `content` or `content_base64`, then the object will be empty. From 147aa76ae0f5dafb27ec430b471a30f79d13f0be Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 31 Aug 2019 18:39:04 -0400 Subject: [PATCH 03/10] r/aws_s3_bucket_object: Add 'force_destroy' attribute. --- aws/resource_aws_s3_bucket_object.go | 198 +++++++++++++++--- aws/resource_aws_s3_bucket_object_test.go | 96 +-------- website/docs/r/s3_bucket_object.html.markdown | 2 + 3 files changed, 175 insertions(+), 121 deletions(-) diff --git a/aws/resource_aws_s3_bucket_object.go b/aws/resource_aws_s3_bucket_object.go index ca8c6daaa44..b4f9c3c79e8 100644 --- a/aws/resource_aws_s3_bucket_object.go +++ b/aws/resource_aws_s3_bucket_object.go @@ -163,6 +163,12 @@ func resourceAwsS3BucketObject() *schema.Resource { Optional: true, }, + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "object_lock_legal_hold_status": { Type: schema.TypeString, Optional: true, @@ -429,39 +435,8 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e // We are effectively ignoring any leading '/' in the key name as aws.Config.DisableRestProtocolURICleaning is false key = strings.TrimPrefix(key, "/") - if _, ok := d.GetOk("version_id"); ok { - // Bucket is versioned, we need to delete all versions - vInput := s3.ListObjectVersionsInput{ - Bucket: aws.String(bucket), - Prefix: aws.String(key), - } - out, err := s3conn.ListObjectVersions(&vInput) - if err != nil { - return fmt.Errorf("Failed listing S3 object versions: %s", err) - } - - for _, v := range out.Versions { - input := s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - VersionId: v.VersionId, - } - _, err := s3conn.DeleteObject(&input) - if err != nil { - return fmt.Errorf("Error deleting S3 object version of %s:\n %s:\n %s", - key, v, err) - } - } - } else { - // Just delete the object - input := s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - _, err := s3conn.DeleteObject(&input) - if err != nil { - return fmt.Errorf("Error deleting S3 bucket object: %s Bucket: %q Object: %q", err, bucket, key) - } + if err := deleteAllS3ObjectVersions(s3conn, bucket, key, d.Get("force_destroy").(bool), false); err != nil { + return fmt.Errorf("error deleting S3 Bucket (%s) Object (%s): %s", bucket, key, err) } return nil @@ -486,3 +461,160 @@ func resourceAwsS3BucketObjectCustomizeDiff(d *schema.ResourceDiff, meta interfa return nil } + +// deleteAllS3ObjectVersions deletes all versions of a specified key from an S3 bucket. +// If key is empty then all versions of all objects are deleted. +// Set force to true to override any S3 object lock protections. +func deleteAllS3ObjectVersions(conn *s3.S3, bucketName, key string, force, ignoreObjectErrors bool) error { + input := &s3.ListObjectVersionsInput{ + Bucket: aws.String(bucketName), + } + if key != "" { + input.Prefix = aws.String(key) + } + + var lastErr error + err := conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, objectVersion := range page.Versions { + objectKey := aws.StringValue(objectVersion.Key) + objectVersionID := aws.StringValue(objectVersion.VersionId) + + if key != "" && key != objectKey { + continue + } + + err := deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID) + if isAWSErr(err, "AccessDenied", "") && force { + // Remove any legal hold. + resp, err := conn.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucketName), + Key: objectVersion.Key, + VersionId: objectVersion.VersionId, + }) + + if err != nil { + log.Printf("[ERROR] Error getting S3 Bucket (%s) Object (%s) Version (%s) metadata: %s", bucketName, objectKey, objectVersionID, err) + lastErr = err + continue + } + + if aws.StringValue(resp.ObjectLockLegalHoldStatus) == s3.ObjectLockLegalHoldStatusOn { + _, err := conn.PutObjectLegalHold(&s3.PutObjectLegalHoldInput{ + Bucket: aws.String(bucketName), + Key: objectVersion.Key, + VersionId: objectVersion.VersionId, + LegalHold: &s3.ObjectLockLegalHold{ + Status: aws.String(s3.ObjectLockLegalHoldStatusOff), + }, + }) + + if err != nil { + log.Printf("[ERROR] Error putting S3 Bucket (%s) Object (%s) Version(%s) legal hold: %s", bucketName, objectKey, objectVersionID, err) + lastErr = err + continue + } + + // Attempt to delete again. + err = deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID) + + if err != nil { + lastErr = err + } + + continue + } + + // AccessDenied for another reason. + lastErr = fmt.Errorf("AccessDenied deleting S3 Bucket (%s) Object (%s) Version: %s", bucketName, objectKey, objectVersionID) + continue + } + + if err != nil { + lastErr = err + } + } + + return !lastPage + }) + + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + err = nil + } + + if err != nil { + return err + } + + if lastErr != nil { + if !ignoreObjectErrors { + return fmt.Errorf("error deleting at least one object version, last error: %s", lastErr) + } + + lastErr = nil + } + + err = conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, deleteMarker := range page.DeleteMarkers { + deleteMarkerKey := aws.StringValue(deleteMarker.Key) + deleteMarkerVersionID := aws.StringValue(deleteMarker.VersionId) + + if key != "" && key != deleteMarkerKey { + continue + } + + err := deleteS3ObjectVersion(conn, bucketName, deleteMarkerKey, deleteMarkerVersionID) + + if err != nil { + lastErr = err + } + } + + return !lastPage + }) + + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { + err = nil + } + + if err != nil { + return err + } + + if lastErr != nil { + if !ignoreObjectErrors { + return fmt.Errorf("error deleting at least one object delete marker, last error: %s", lastErr) + } + + lastErr = nil + } + + return nil +} + +// deleteS3ObjectVersion deletes a specific bucket object version. +func deleteS3ObjectVersion(conn *s3.S3, b, k, v string) error { + log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version: %s", b, k, v) + _, err := conn.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(b), + Key: aws.String(k), + VersionId: aws.String(v), + }) + + if err != nil { + log.Printf("[WARN] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", b, k, v, err) + } + + if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, s3.ErrCodeNoSuchKey, "") { + return nil + } + + return err +} diff --git a/aws/resource_aws_s3_bucket_object_test.go b/aws/resource_aws_s3_bucket_object_test.go index d0ee4b976b4..92bfab5a40e 100644 --- a/aws/resource_aws_s3_bucket_object_test.go +++ b/aws/resource_aws_s3_bucket_object_test.go @@ -81,79 +81,8 @@ func testSweepS3BucketObjects(region string) error { continue } - input := &s3.ListObjectVersionsInput{ - Bucket: bucket.Name, - } - - err = conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, objectVersion := range page.Versions { - input := &s3.DeleteObjectInput{ - Bucket: bucket.Name, - Key: objectVersion.Key, - VersionId: objectVersion.VersionId, - } - objectKey := aws.StringValue(objectVersion.Key) - objectVersionID := aws.StringValue(objectVersion.VersionId) - - log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version: %s", bucketName, objectKey, objectVersionID) - _, err := conn.DeleteObject(input) - - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, s3.ErrCodeNoSuchKey, "") { - continue - } - - if err != nil { - log.Printf("[ERROR] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", bucketName, objectKey, objectVersionID, err) - } - } - - return !lastPage - }) - - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { - continue - } - - if err != nil { - return fmt.Errorf("error listing S3 Bucket (%s) Objects: %s", bucketName, err) - } - - err = conn.ListObjectVersionsPages(input, func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, deleteMarker := range page.DeleteMarkers { - input := &s3.DeleteObjectInput{ - Bucket: bucket.Name, - Key: deleteMarker.Key, - VersionId: deleteMarker.VersionId, - } - deleteMarkerKey := aws.StringValue(deleteMarker.Key) - deleteMarkerVersionID := aws.StringValue(deleteMarker.VersionId) - - log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Delete Marker: %s", bucketName, deleteMarkerKey, deleteMarkerVersionID) - _, err := conn.DeleteObject(input) - - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") || isAWSErr(err, s3.ErrCodeNoSuchKey, "") { - continue - } - - if err != nil { - log.Printf("[ERROR] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", bucketName, deleteMarkerKey, deleteMarkerVersionID, err) - } - } - - return !lastPage - }) - - if isAWSErr(err, s3.ErrCodeNoSuchBucket, "") { - continue - } + // Delete everything including locked objects. Ignore any object errors. + err = deleteAllS3ObjectVersions(conn, bucketName, "", true, true) if err != nil { return fmt.Errorf("error listing S3 Bucket (%s) Objects: %s", bucketName, err) @@ -788,25 +717,16 @@ func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithNone(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "ON"), ), }, + // Remove legal hold but create a new object version to test force_destroy { - Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "stuff", "OFF"), + Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "changed stuff", "OFF"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), - testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), - testAccCheckAWSS3BucketObjectBody(&obj3, "stuff"), + testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj3, &obj2), + testAccCheckAWSS3BucketObjectBody(&obj3, "changed stuff"), resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), ), }, - // Remove legal hold but create a new object version to test force_destroy - // { - // Config: testAccAWSS3BucketObjectConfig_withObjectLockLegalHold(rInt, "changed stuff", "OFF"), - // Check: resource.ComposeTestCheckFunc( - // testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), - // testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj3, &obj2), - // testAccCheckAWSS3BucketObjectBody(&obj3, "changed stuff"), - // resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), - // ), - // }, }, }) } @@ -1327,7 +1247,7 @@ resource "aws_s3_bucket_object" "object" { bucket = "${aws_s3_bucket.object_bucket.bucket}" key = "test-key" content = "%s" - //force_destroy = true + force_destroy = true } `, randInt, content) } @@ -1349,7 +1269,7 @@ resource "aws_s3_bucket_object" "object" { key = "test-key" content = "%s" object_lock_legal_hold_status = "%s" - //force_destroy = true + force_destroy = true } `, randInt, content, legalHoldStatus) } diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index a153657b7e7..c26c0cff0a0 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -109,6 +109,8 @@ use the exported `arn` attribute: `kms_key_id = "${aws_kms_key.foo.arn}"` * `metadata` - (Optional) A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API). * `tags` - (Optional) A mapping of tags to assign to the object. +`force_destroy` - (Optional) Allow the object to be deleted by removing any legal hold on any object version. +Default is `false`. * `object_lock_legal_hold_status` - (Optional) Indicates whether this object has an active [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds). Valid values are `ON` and `OFF`. If no content is provided through `source`, `content` or `content_base64`, then the object will be empty. From 50603791823016e2431fccf86beb881b2287a660 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 2 Sep 2019 15:28:45 -0400 Subject: [PATCH 04/10] r/aws_s3_bucket: Handle locked object for 'force_destroy'. --- aws/resource_aws_s3_bucket.go | 51 ++---------- aws/resource_aws_s3_bucket_test.go | 111 +++++++++++++++++++++++++ website/docs/r/s3_bucket.html.markdown | 2 +- 3 files changed, 119 insertions(+), 45 deletions(-) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index 49c73881d6c..745c679240e 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -567,8 +567,8 @@ func resourceAwsS3Bucket() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ - s3.ObjectLockModeGovernance, - s3.ObjectLockModeCompliance, + s3.ObjectLockRetentionModeGovernance, + s3.ObjectLockRetentionModeCompliance, }, false), }, @@ -1259,49 +1259,12 @@ func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { // bucket may have things delete them log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err) - bucket := d.Get("bucket").(string) - resp, err := s3conn.ListObjectVersions( - &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucket), - }, - ) - - if err != nil { - return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err) - } - - objectsToDelete := make([]*s3.ObjectIdentifier, 0) - - if len(resp.DeleteMarkers) != 0 { - - for _, v := range resp.DeleteMarkers { - objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ - Key: v.Key, - VersionId: v.VersionId, - }) - } - } - - if len(resp.Versions) != 0 { - for _, v := range resp.Versions { - objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{ - Key: v.Key, - VersionId: v.VersionId, - }) - } - } - - params := &s3.DeleteObjectsInput{ - Bucket: aws.String(bucket), - Delete: &s3.Delete{ - Objects: objectsToDelete, - }, - } - - _, err = s3conn.DeleteObjects(params) + // Delete everything including locked objects. + // Don't ignore any object errors or we could recurse infinitely. + err = deleteAllS3ObjectVersions(s3conn, d.Id(), "", true, false) if err != nil { - return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err) + return fmt.Errorf("error S3 Bucket force_destroy: %s", err) } // this line recurses until all objects are deleted or an error is returned @@ -2472,7 +2435,7 @@ type S3Website struct { // S3 Object Lock functions. // -func readS3ObjectLockConfiguration(conn *s3.S3, bucket string) (interface{}, error) { +func readS3ObjectLockConfiguration(conn *s3.S3, bucket string) ([]interface{}, error) { resp, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { return conn.GetObjectLockConfiguration(&s3.GetObjectLockConfigurationInput{ Bucket: aws.String(bucket), diff --git a/aws/resource_aws_s3_bucket_test.go b/aws/resource_aws_s3_bucket_test.go index 008ca78690f..c2bb995ce23 100644 --- a/aws/resource_aws_s3_bucket_test.go +++ b/aws/resource_aws_s3_bucket_test.go @@ -1884,6 +1884,48 @@ func TestAccAWSS3Bucket_objectLock(t *testing.T) { }) } +func TestAccAWSS3Bucket_forceDestroy(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + rInt := acctest.RandInt() + bucketName := fmt.Sprintf("tf-test-bucket-%d", rInt) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfig_forceDestroy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists(resourceName), + testAccCheckAWSS3BucketAddObjects(resourceName, "data.txt", "prefix/more_data.txt"), + ), + }, + }, + }) +} + +func TestAccAWSS3Bucket_forceDestroyWithObjectLockEnabled(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + rInt := acctest.RandInt() + bucketName := fmt.Sprintf("tf-test-bucket-%d", rInt) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfig_forceDestroyWithObjectLockEnabled(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists(resourceName), + testAccCheckAWSS3BucketAddObjectsWithLegalHold(resourceName, "data.txt", "prefix/more_data.txt"), + ), + }, + }, + }) +} + func TestAWSS3BucketName(t *testing.T) { validDnsNames := []string{ "foobar", @@ -2098,6 +2140,47 @@ func testAccCheckAWSS3DestroyBucket(n string) resource.TestCheckFunc { } } +func testAccCheckAWSS3BucketAddObjects(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := testAccProvider.Meta().(*AWSClient).s3conn + + for _, key := range keys { + _, err := conn.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), + }) + + if err != nil { + return fmt.Errorf("PutObject error: %s", err) + } + } + + return nil + } +} + +func testAccCheckAWSS3BucketAddObjectsWithLegalHold(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := testAccProvider.Meta().(*AWSClient).s3conn + + for _, key := range keys { + _, err := conn.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), + ObjectLockLegalHoldStatus: aws.String(s3.ObjectLockLegalHoldStatusOn), + }) + + if err != nil { + return fmt.Errorf("PutObject error: %s", err) + } + } + + return nil + } +} + // Create an S3 bucket via a CF stack so that it has system tags. func testAccCheckAWSS3BucketCreateViaCloudFormation(n string, stackId *string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -3697,6 +3780,34 @@ resource "aws_s3_bucket" "arbitrary" { `, randInt) } +func testAccAWSS3BucketConfig_forceDestroy(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "bucket" { + bucket = "%s" + acl = "private" + force_destroy = true +} +`, bucketName) +} + +func testAccAWSS3BucketConfig_forceDestroyWithObjectLockEnabled(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "bucket" { + bucket = "%s" + acl = "private" + force_destroy = true + + versioning { + enabled = true + } + + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} +`, bucketName) +} + const testAccAWSS3BucketConfigBucketEmptyString = ` resource "aws_s3_bucket" "test" { bucket = "" diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 94b9bdde032..87d1844ba9c 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -320,7 +320,7 @@ The following arguments are supported: * `policy` - (Optional) A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), Terraform may view the policy as constantly changing in a `terraform plan`. In this case, please make sure you use the verbose/specific version of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](/docs/providers/aws/guides/iam-policy-documents.html). * `tags` - (Optional) A mapping of tags to assign to the bucket. -* `force_destroy` - (Optional, Default:false ) A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. +* `force_destroy` - (Optional, Default:`false`) A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. * `website` - (Optional) A website object (documented below). * `cors_rule` - (Optional) A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below). * `versioning` - (Optional) A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below) From f49f02928ca11a3ce110d2f0a0a98b0c9587e2ef Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 2 Sep 2019 18:22:36 -0400 Subject: [PATCH 05/10] d/aws_s3_bucket_object: Add 'object_lock_mode' and 'object_lock_retain_until_date' attributes. r/aws_s3_bucket_object: Add 'object_lock_mode' and 'object_lock_retain_until_date' attributes. --- aws/data_source_aws_s3_bucket_object.go | 10 ++ aws/data_source_aws_s3_bucket_object_test.go | 89 +++++++++- aws/resource_aws_s3_bucket_object.go | 90 +++++++++- aws/resource_aws_s3_bucket_object_test.go | 167 ++++++++++++++++++ website/docs/d/s3_bucket_object.html.markdown | 2 + website/docs/r/s3_bucket_object.html.markdown | 4 +- 6 files changed, 351 insertions(+), 11 deletions(-) diff --git a/aws/data_source_aws_s3_bucket_object.go b/aws/data_source_aws_s3_bucket_object.go index 776c808adf8..0f213cf86a2 100644 --- a/aws/data_source_aws_s3_bucket_object.go +++ b/aws/data_source_aws_s3_bucket_object.go @@ -78,6 +78,14 @@ func dataSourceAwsS3BucketObject() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "object_lock_mode": { + Type: schema.TypeString, + Computed: true, + }, + "object_lock_retain_until_date": { + Type: schema.TypeString, + Computed: true, + }, "range": { Type: schema.TypeString, Optional: true, @@ -160,6 +168,8 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e d.Set("last_modified", out.LastModified.Format(time.RFC1123)) d.Set("metadata", pointersMapToStringList(out.Metadata)) d.Set("object_lock_legal_hold_status", out.ObjectLockLegalHoldStatus) + d.Set("object_lock_mode", out.ObjectLockMode) + d.Set("object_lock_retain_until_date", flattenS3ObjectLockRetainUntilDate(out.ObjectLockRetainUntilDate)) d.Set("server_side_encryption", out.ServerSideEncryption) d.Set("sse_kms_key_id", out.SSEKMSKeyId) d.Set("version_id", out.VersionId) diff --git a/aws/data_source_aws_s3_bucket_object_test.go b/aws/data_source_aws_s3_bucket_object_test.go index d401729c51f..15532d17c1b 100644 --- a/aws/data_source_aws_s3_bucket_object_test.go +++ b/aws/data_source_aws_s3_bucket_object_test.go @@ -4,6 +4,7 @@ import ( "fmt" "regexp" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -40,6 +41,8 @@ func TestAccDataSourceAWSS3BucketObject_basic(t *testing.T) { resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), ), }, @@ -75,6 +78,8 @@ func TestAccDataSourceAWSS3BucketObject_readableBody(t *testing.T) { resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "yes"), ), }, @@ -113,6 +118,8 @@ func TestAccDataSourceAWSS3BucketObject_kmsEncrypted(t *testing.T) { resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "Keep Calm and Carry On"), ), }, @@ -165,15 +172,17 @@ func TestAccDataSourceAWSS3BucketObject_allParams(t *testing.T) { resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "metadata.%", "0"), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "tags.%", "1"), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), ), }, }, }) } -func TestAccDataSourceAWSS3BucketObject_ObjectLockLegalHold(t *testing.T) { +func TestAccDataSourceAWSS3BucketObject_ObjectLockLegalHoldOff(t *testing.T) { rInt := acctest.RandInt() - resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_objectLockLegalHold(rInt) + resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOff(rInt) var rObj s3.GetObjectOutput var dsObj s3.GetObjectOutput @@ -199,6 +208,46 @@ func TestAccDataSourceAWSS3BucketObject_ObjectLockLegalHold(t *testing.T) { resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", "OFF"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", ""), + resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), + ), + }, + }, + }) +} + +func TestAccDataSourceAWSS3BucketObject_ObjectLockLegalHoldOn(t *testing.T) { + rInt := acctest.RandInt() + retainUntilDate := time.Now().UTC().AddDate(0, 0, 10).Format(time.RFC3339) + resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOn(rInt, retainUntilDate) + + var rObj s3.GetObjectOutput + var dsObj s3.GetObjectOutput + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: resourceOnlyConf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), + ), + }, + { + Config: conf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "11"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "binary/octet-stream"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "b10a8db164e0754105b7a99be72e3fe5"), + resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", + regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_legal_hold_status", "ON"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "object_lock_retain_until_date", retainUntilDate), resource.TestCheckNoResourceAttr("data.aws_s3_bucket_object.obj", "body"), ), }, @@ -343,7 +392,7 @@ data "aws_s3_bucket_object" "obj" { return resources, both } -func testAccAWSDataSourceS3ObjectConfig_objectLockLegalHold(randInt int) (string, string) { +func testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOff(randInt int) (string, string) { resources := fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { bucket = "tf-object-test-bucket-%d" @@ -373,3 +422,37 @@ data "aws_s3_bucket_object" "obj" { return resources, both } + +func testAccAWSDataSourceS3ObjectConfig_objectLockLegalHoldOn(randInt int, retainUntilDate string) (string, string) { + resources := fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + + versioning { + enabled = true + } + + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "tf-testing-obj-%d" + content = "Hello World" + force_destroy = true + object_lock_legal_hold_status = "ON" + object_lock_mode = "GOVERNANCE" + object_lock_retain_until_date = "%s" +} +`, randInt, randInt, retainUntilDate) + + both := fmt.Sprintf(`%s +data "aws_s3_bucket_object" "obj" { + bucket = "tf-object-test-bucket-%d" + key = "tf-testing-obj-%d" +} +`, resources, randInt, randInt) + + return resources, both +} diff --git a/aws/resource_aws_s3_bucket_object.go b/aws/resource_aws_s3_bucket_object.go index b4f9c3c79e8..51f10a39f49 100644 --- a/aws/resource_aws_s3_bucket_object.go +++ b/aws/resource_aws_s3_bucket_object.go @@ -9,6 +9,7 @@ import ( "net/url" "os" "strings" + "time" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" @@ -177,6 +178,21 @@ func resourceAwsS3BucketObject() *schema.Resource { s3.ObjectLockLegalHoldStatusOff, }, false), }, + + "object_lock_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + s3.ObjectLockModeGovernance, + s3.ObjectLockModeCompliance, + }, false), + }, + + "object_lock_retain_until_date": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.ValidateRFC3339TimeString, + }, }, } } @@ -282,6 +298,14 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro putInput.ObjectLockLegalHoldStatus = aws.String(v.(string)) } + if v, ok := d.GetOk("object_lock_mode"); ok { + putInput.ObjectLockMode = aws.String(v.(string)) + } + + if v, ok := d.GetOk("object_lock_retain_until_date"); ok { + putInput.ObjectLockRetainUntilDate = expandS3ObjectLockRetainUntilDate(v.(string)) + } + if _, err := s3conn.PutObject(putInput); err != nil { return fmt.Errorf("Error putting object in S3 bucket (%s): %s", bucket, err) } @@ -337,6 +361,8 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err d.Set("server_side_encryption", resp.ServerSideEncryption) d.Set("website_redirect", resp.WebsiteRedirectLocation) d.Set("object_lock_legal_hold_status", resp.ObjectLockLegalHoldStatus) + d.Set("object_lock_mode", resp.ObjectLockMode) + d.Set("object_lock_retain_until_date", flattenS3ObjectLockRetainUntilDate(resp.ObjectLockRetainUntilDate)) // Only set non-default KMS key ID (one that doesn't match default) if resp.SSEKMSKeyId != nil { @@ -420,6 +446,32 @@ func resourceAwsS3BucketObjectUpdate(d *schema.ResourceData, meta interface{}) e } } + if d.HasChange("object_lock_mode") || d.HasChange("object_lock_retain_until_date") { + req := &s3.PutObjectRetentionInput{ + Bucket: aws.String(d.Get("bucket").(string)), + Key: aws.String(d.Get("key").(string)), + Retention: &s3.ObjectLockRetention{ + Mode: aws.String(d.Get("object_lock_mode").(string)), + RetainUntilDate: expandS3ObjectLockRetainUntilDate(d.Get("object_lock_retain_until_date").(string)), + }, + } + + // Bypass required to lower or clear retain-until date. + if d.HasChange("object_lock_retain_until_date") { + oraw, nraw := d.GetChange("object_lock_retain_until_date") + o := expandS3ObjectLockRetainUntilDate(oraw.(string)) + n := expandS3ObjectLockRetainUntilDate(nraw.(string)) + if n == nil || (o != nil && n.Before(*o)) { + req.BypassGovernanceRetention = aws.Bool(true) + } + } + + _, err := conn.PutObjectRetention(req) + if err != nil { + return fmt.Errorf("error putting S3 object lock retention: %s", err) + } + } + if err := setTagsS3Object(conn, d); err != nil { return fmt.Errorf("error setting S3 object tags: %s", err) } @@ -487,7 +539,7 @@ func deleteAllS3ObjectVersions(conn *s3.S3, bucketName, key string, force, ignor continue } - err := deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID) + err := deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID, force) if isAWSErr(err, "AccessDenied", "") && force { // Remove any legal hold. resp, err := conn.HeadObject(&s3.HeadObjectInput{ @@ -519,7 +571,7 @@ func deleteAllS3ObjectVersions(conn *s3.S3, bucketName, key string, force, ignor } // Attempt to delete again. - err = deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID) + err = deleteS3ObjectVersion(conn, bucketName, objectKey, objectVersionID, force) if err != nil { lastErr = err @@ -570,7 +622,8 @@ func deleteAllS3ObjectVersions(conn *s3.S3, bucketName, key string, force, ignor continue } - err := deleteS3ObjectVersion(conn, bucketName, deleteMarkerKey, deleteMarkerVersionID) + // Delete markers have no object lock protections. + err := deleteS3ObjectVersion(conn, bucketName, deleteMarkerKey, deleteMarkerVersionID, false) if err != nil { lastErr = err @@ -600,13 +653,19 @@ func deleteAllS3ObjectVersions(conn *s3.S3, bucketName, key string, force, ignor } // deleteS3ObjectVersion deletes a specific bucket object version. -func deleteS3ObjectVersion(conn *s3.S3, b, k, v string) error { - log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version: %s", b, k, v) - _, err := conn.DeleteObject(&s3.DeleteObjectInput{ +// Set force to true to override any S3 object lock protections. +func deleteS3ObjectVersion(conn *s3.S3, b, k, v string, force bool) error { + input := &s3.DeleteObjectInput{ Bucket: aws.String(b), Key: aws.String(k), VersionId: aws.String(v), - }) + } + if force { + input.BypassGovernanceRetention = aws.Bool(true) + } + + log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version: %s", b, k, v) + _, err := conn.DeleteObject(input) if err != nil { log.Printf("[WARN] Error deleting S3 Bucket (%s) Object (%s) Version (%s): %s", b, k, v, err) @@ -618,3 +677,20 @@ func deleteS3ObjectVersion(conn *s3.S3, b, k, v string) error { return err } + +func expandS3ObjectLockRetainUntilDate(v string) *time.Time { + t, err := time.Parse(time.RFC3339, v) + if err != nil { + return nil + } + + return aws.Time(t) +} + +func flattenS3ObjectLockRetainUntilDate(t *time.Time) string { + if t == nil { + return "" + } + + return t.Format(time.RFC3339) +} diff --git a/aws/resource_aws_s3_bucket_object_test.go b/aws/resource_aws_s3_bucket_object_test.go index 92bfab5a40e..5837593e3bb 100644 --- a/aws/resource_aws_s3_bucket_object_test.go +++ b/aws/resource_aws_s3_bucket_object_test.go @@ -11,6 +11,7 @@ import ( "sort" "strings" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" @@ -276,6 +277,8 @@ func TestAccAWSS3BucketObject_updates(t *testing.T) { testAccCheckAWSS3BucketObjectBody(&originalObj, "initial object state"), resource.TestCheckResourceAttr(resourceName, "etag", "647d1d58e1011c743ec67d5e8af87b53"), resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), ), }, { @@ -285,6 +288,8 @@ func TestAccAWSS3BucketObject_updates(t *testing.T) { testAccCheckAWSS3BucketObjectBody(&modifiedObj, "modified object"), resource.TestCheckResourceAttr(resourceName, "etag", "1c7fd13df1515c2a13ad9eb068931f09"), resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), ), }, }, @@ -706,6 +711,8 @@ func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithNone(t *testing.T) { testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), ), }, { @@ -715,6 +722,8 @@ func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithNone(t *testing.T) { testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "ON"), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), ), }, // Remove legal hold but create a new object version to test force_destroy @@ -725,6 +734,8 @@ func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithNone(t *testing.T) { testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj3, &obj2), testAccCheckAWSS3BucketObjectBody(&obj3, "changed stuff"), resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), ), }, }, @@ -747,6 +758,8 @@ func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithOn(t *testing.T) { testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "ON"), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), ), }, { @@ -756,6 +769,116 @@ func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithOn(t *testing.T) { testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", "OFF"), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketObject_ObjectLockRetentionStartWithNone(t *testing.T) { + var obj1, obj2, obj3 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + retainUntilDate := time.Now().UTC().AddDate(0, 0, 10).Format(time.RFC3339) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketObjectConfig_noObjectLockRetention(rInt, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockRetention(rInt, "stuff", retainUntilDate), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", retainUntilDate), + ), + }, + // Remove retention period but create a new object version to test force_destroy + { + Config: testAccAWSS3BucketObjectConfig_noObjectLockRetention(rInt, "changed stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), + testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj3, &obj2), + testAccCheckAWSS3BucketObjectBody(&obj3, "changed stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketObject_ObjectLockRetentionStartWithSet(t *testing.T) { + var obj1, obj2, obj3, obj4 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + retainUntilDate1 := time.Now().UTC().AddDate(0, 0, 20).Format(time.RFC3339) + retainUntilDate2 := time.Now().UTC().AddDate(0, 0, 30).Format(time.RFC3339) + retainUntilDate3 := time.Now().UTC().AddDate(0, 0, 10).Format(time.RFC3339) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockRetention(rInt, "stuff", retainUntilDate1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", retainUntilDate1), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockRetention(rInt, "stuff", retainUntilDate2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", retainUntilDate2), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_withObjectLockRetention(rInt, "stuff", retainUntilDate3), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), + testAccCheckAWSS3BucketObjectBody(&obj3, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", "GOVERNANCE"), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", retainUntilDate3), + ), + }, + { + Config: testAccAWSS3BucketObjectConfig_noObjectLockRetention(rInt, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj4), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj4, &obj3), + testAccCheckAWSS3BucketObjectBody(&obj4, "stuff"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), ), }, }, @@ -1273,3 +1396,47 @@ resource "aws_s3_bucket_object" "object" { } `, randInt, content, legalHoldStatus) } + +func testAccAWSS3BucketObjectConfig_noObjectLockRetention(randInt int, content string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + versioning { + enabled = true + } + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "%s" + force_destroy = true +} +`, randInt, content) +} + +func testAccAWSS3BucketObjectConfig_withObjectLockRetention(randInt int, content, retainUntilDate string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" + versioning { + enabled = true + } + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "%s" + force_destroy = true + object_lock_mode = "GOVERNANCE" + object_lock_retain_until_date = "%s" +} +`, randInt, content, retainUntilDate) +} diff --git a/website/docs/d/s3_bucket_object.html.markdown b/website/docs/d/s3_bucket_object.html.markdown index 86603db384b..d22772d65d8 100644 --- a/website/docs/d/s3_bucket_object.html.markdown +++ b/website/docs/d/s3_bucket_object.html.markdown @@ -78,6 +78,8 @@ In addition to all arguments above, the following attributes are exported: * `last_modified` - Last modified date of the object in RFC1123 format (e.g. `Mon, 02 Jan 2006 15:04:05 MST`) * `metadata` - A map of metadata stored with the object in S3 * `object_lock_legal_hold_status` - Indicates whether this object has an active [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds). This field is only returned if you have permission to view an object's legal hold status. +* `object_lock_mode` - The object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) currently in place for this object. +* `object_lock_retain_until_date` - The date and time when this object's object lock will expire. * `server_side_encryption` - If the object is stored using server-side encryption (KMS or Amazon S3-managed encryption key), this field includes the chosen encryption and algorithm used. * `sse_kms_key_id` - If present, specifies the ID of the Key Management Service (KMS) master encryption key that was used for the object. * `storage_class` - [Storage class](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) information of the object. Available for all objects except for `Standard` storage class objects. diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index c26c0cff0a0..44538734704 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -111,7 +111,9 @@ use the exported `arn` attribute: * `tags` - (Optional) A mapping of tags to assign to the object. `force_destroy` - (Optional) Allow the object to be deleted by removing any legal hold on any object version. Default is `false`. -* `object_lock_legal_hold_status` - (Optional) Indicates whether this object has an active [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds). Valid values are `ON` and `OFF`. +* `object_lock_legal_hold_status` - (Optional) The [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`. +* `object_lock_mode` - (Optional) The object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`. +* `object_lock_retain_until_date` - (Optional) The date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods). If no content is provided through `source`, `content` or `content_base64`, then the object will be empty. From 21a9fa7f07bd70f9adbfe94dc04889d753d7fa09 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 2 Sep 2019 18:34:12 -0400 Subject: [PATCH 06/10] Fix documentation typo. --- website/docs/r/s3_bucket_object.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index 44538734704..a119dcebb70 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -109,7 +109,7 @@ use the exported `arn` attribute: `kms_key_id = "${aws_kms_key.foo.arn}"` * `metadata` - (Optional) A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API). * `tags` - (Optional) A mapping of tags to assign to the object. -`force_destroy` - (Optional) Allow the object to be deleted by removing any legal hold on any object version. +* `force_destroy` - (Optional) Allow the object to be deleted by removing any legal hold on any object version. Default is `false`. * `object_lock_legal_hold_status` - (Optional) The [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`. * `object_lock_mode` - (Optional) The object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`. From 7754826218d34d7c331833ab138a28e27b09f28e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 6 Sep 2019 15:14:48 -0400 Subject: [PATCH 07/10] r/aws_s3_bucket_object: Add S3 Object Lock example usage. --- website/docs/r/s3_bucket_object.html.markdown | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index a119dcebb70..31fd8fa6f42 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -80,6 +80,25 @@ resource "aws_s3_bucket_object" "examplebucket_object" { } ``` +### S3 Object Lock + +```hcl +resource "aws_s3_bucket" "examplebucket" { + bucket = "examplebuckettftest" + acl = "private" +} + +resource "aws_s3_bucket_object" "examplebucket_object" { + key = "someobject" + bucket = "${aws_s3_bucket.examplebucket.id}" + source = "important.txt" + + object_lock_legal_hold_status = "ON" + object_lock_mode = "GOVERNANCE" + object_lock_retain_until_date = "2021-12-31T23:59:60Z" +} +``` + ## Argument Reference -> **Note:** If you specify `content_encoding` you are responsible for encoding the body appropriately. `source`, `content`, and `content_base64` all expect already encoded/compressed bytes. @@ -110,7 +129,7 @@ use the exported `arn` attribute: * `metadata` - (Optional) A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API). * `tags` - (Optional) A mapping of tags to assign to the object. * `force_destroy` - (Optional) Allow the object to be deleted by removing any legal hold on any object version. -Default is `false`. +Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled. * `object_lock_legal_hold_status` - (Optional) The [legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`. * `object_lock_mode` - (Optional) The object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`. * `object_lock_retain_until_date` - (Optional) The date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods). From ced3fa036359ec79f2ec5ef16ef60504e8ce8287 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 6 Sep 2019 16:14:24 -0400 Subject: [PATCH 08/10] Only attempt to force destroy S3 bucket objects if the bucket has Object Lock enabled. --- aws/resource_aws_s3_bucket.go | 7 ++++++- aws/resource_aws_s3_bucket_object.go | 2 +- aws/resource_aws_s3_bucket_object_test.go | 9 ++++++++- aws/resource_aws_s3_bucket_test.go | 18 ++++++++++++++++++ 4 files changed, 33 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index 745c679240e..c485577f0a1 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -1261,7 +1261,12 @@ func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { // Delete everything including locked objects. // Don't ignore any object errors or we could recurse infinitely. - err = deleteAllS3ObjectVersions(s3conn, d.Id(), "", true, false) + objectLockEnabled := false + objectLockConfiguration := expandS3ObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})) + if objectLockConfiguration != nil && aws.StringValue(objectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled { + objectLockEnabled = true + } + err = deleteAllS3ObjectVersions(s3conn, d.Id(), "", objectLockEnabled, false) if err != nil { return fmt.Errorf("error S3 Bucket force_destroy: %s", err) diff --git a/aws/resource_aws_s3_bucket_object.go b/aws/resource_aws_s3_bucket_object.go index 51f10a39f49..81c3d64841a 100644 --- a/aws/resource_aws_s3_bucket_object.go +++ b/aws/resource_aws_s3_bucket_object.go @@ -516,7 +516,7 @@ func resourceAwsS3BucketObjectCustomizeDiff(d *schema.ResourceDiff, meta interfa // deleteAllS3ObjectVersions deletes all versions of a specified key from an S3 bucket. // If key is empty then all versions of all objects are deleted. -// Set force to true to override any S3 object lock protections. +// Set force to true to override any S3 object lock protections on object lock enabled buckets. func deleteAllS3ObjectVersions(conn *s3.S3, bucketName, key string, force, ignoreObjectErrors bool) error { input := &s3.ListObjectVersionsInput{ Bucket: aws.String(bucketName), diff --git a/aws/resource_aws_s3_bucket_object_test.go b/aws/resource_aws_s3_bucket_object_test.go index 5837593e3bb..8b8bb0175a9 100644 --- a/aws/resource_aws_s3_bucket_object_test.go +++ b/aws/resource_aws_s3_bucket_object_test.go @@ -82,8 +82,15 @@ func testSweepS3BucketObjects(region string) error { continue } + objectLockEnabled, err := testS3BucketObjectLockEnabled(conn, bucketName) + + if err != nil { + log.Printf("[ERROR] Error getting S3 Bucket (%s) Object Lock: %s", bucketName, err) + continue + } + // Delete everything including locked objects. Ignore any object errors. - err = deleteAllS3ObjectVersions(conn, bucketName, "", true, true) + err = deleteAllS3ObjectVersions(conn, bucketName, "", objectLockEnabled, true) if err != nil { return fmt.Errorf("error listing S3 Bucket (%s) Objects: %s", bucketName, err) diff --git a/aws/resource_aws_s3_bucket_test.go b/aws/resource_aws_s3_bucket_test.go index c2bb995ce23..2437165bff9 100644 --- a/aws/resource_aws_s3_bucket_test.go +++ b/aws/resource_aws_s3_bucket_test.go @@ -136,6 +136,24 @@ func testS3BucketRegion(conn *s3.S3, bucket string) (string, error) { return aws.StringValue(output.LocationConstraint), nil } +func testS3BucketObjectLockEnabled(conn *s3.S3, bucket string) (bool, error) { + input := &s3.GetObjectLockConfigurationInput{ + Bucket: aws.String(bucket), + } + + output, err := conn.GetObjectLockConfiguration(input) + + if isAWSErr(err, "ObjectLockConfigurationNotFoundError", "") { + return false, nil + } + + if err != nil { + return false, err + } + + return aws.StringValue(output.ObjectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled, nil +} + func TestAccAWSS3Bucket_basic(t *testing.T) { rInt := acctest.RandInt() arnRegexp := regexp.MustCompile(`^arn:aws[\w-]*:s3:::`) From c1d396715f1b923e23b4dfb2909752e2a699db71 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 6 Sep 2019 16:39:46 -0400 Subject: [PATCH 09/10] r/aws_s3_bucket_object: Update S3 Object Lock example usage with Object Lock enabled bucket. --- website/docs/r/s3_bucket_object.html.markdown | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index 31fd8fa6f42..322f8a8b990 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -86,6 +86,14 @@ resource "aws_s3_bucket_object" "examplebucket_object" { resource "aws_s3_bucket" "examplebucket" { bucket = "examplebuckettftest" acl = "private" + + versioning { + enabled = true + } + + object_lock_configuration { + object_lock_enabled = "Enabled" + } } resource "aws_s3_bucket_object" "examplebucket_object" { @@ -96,6 +104,8 @@ resource "aws_s3_bucket_object" "examplebucket_object" { object_lock_legal_hold_status = "ON" object_lock_mode = "GOVERNANCE" object_lock_retain_until_date = "2021-12-31T23:59:60Z" + + force_destroy = true } ``` From 69d9efabb279642550bbd73aeca3fd1c5934bca8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 18 Sep 2019 08:27:21 -0400 Subject: [PATCH 10/10] Fix minor review comments. --- aws/resource_aws_s3_bucket.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index c485577f0a1..cdf90113c5a 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -1261,10 +1261,10 @@ func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error { // Delete everything including locked objects. // Don't ignore any object errors or we could recurse infinitely. - objectLockEnabled := false + var objectLockEnabled bool objectLockConfiguration := expandS3ObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})) - if objectLockConfiguration != nil && aws.StringValue(objectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled { - objectLockEnabled = true + if objectLockConfiguration != nil { + objectLockEnabled = aws.StringValue(objectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled } err = deleteAllS3ObjectVersions(s3conn, d.Id(), "", objectLockEnabled, false)