Skip to content

Commit

Permalink
Merge pull request #33778 from nikoshet/f-aws_dms_s3_endpoint_add_glu…
Browse files Browse the repository at this point in the history
…e_catalog_generation

DMS: Add GlueCatalogGeneration option to aws_dms_s3_endpoint
  • Loading branch information
ewbankkit authored Oct 9, 2023
2 parents ac11701 + 146ab14 commit d822748
Show file tree
Hide file tree
Showing 8 changed files with 54 additions and 2 deletions.
11 changes: 11 additions & 0 deletions .changelog/33778.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
```release-note:enhancement
resource/aws_dms_s3_endpoint: Add `glue_catalog_generation` attribute
```

```release-note:enhancement
resource/aws_dms_endpoint: Add `s3_settings.glue_catalog_generation` attribute
```

```release-note:enhancement
data-source/aws_dms_endpoint: Add `s3_settings.glue_catalog_generation` attribute
```
13 changes: 12 additions & 1 deletion internal/service/dms/endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -571,6 +571,11 @@ func ResourceEndpoint() *schema.Resource {
Optional: true,
Default: "",
},
"glue_catalog_generation": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"ignore_header_rows": {
Type: schema.TypeInt,
Optional: true,
Expand Down Expand Up @@ -1426,7 +1431,7 @@ func resourceEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) er
d.Set("endpoint_arn", endpoint.EndpointArn)
d.Set("endpoint_id", endpoint.EndpointIdentifier)
// For some reason the AWS API only accepts lowercase type but returns it as uppercase
d.Set("endpoint_type", strings.ToLower(*endpoint.EndpointType))
d.Set("endpoint_type", strings.ToLower(aws.StringValue(endpoint.EndpointType)))
d.Set("engine_name", endpoint.EngineName)
d.Set("extra_connection_attributes", endpoint.ExtraConnectionAttributes)

Expand Down Expand Up @@ -2023,6 +2028,9 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings {
if v, ok := tfMap["external_table_definition"].(string); ok {
apiObject.ExternalTableDefinition = aws.String(v)
}
if v, ok := tfMap["glue_catalog_generation"].(bool); ok {
apiObject.GlueCatalogGeneration = aws.Bool(v)
}
if v, ok := tfMap["ignore_header_rows"].(int); ok {
apiObject.IgnoreHeaderRows = aws.Int64(int64(v))
}
Expand Down Expand Up @@ -2145,6 +2153,9 @@ func flattenS3Settings(apiObject *dms.S3Settings) []map[string]interface{} {
if v := apiObject.ExternalTableDefinition; v != nil {
tfMap["external_table_definition"] = aws.StringValue(v)
}
if v := apiObject.GlueCatalogGeneration; v != nil {
tfMap["glue_catalog_generation"] = aws.BoolValue(v)
}
if v := apiObject.IgnoreHeaderRows; v != nil {
tfMap["ignore_header_rows"] = aws.Int64Value(v)
}
Expand Down
4 changes: 4 additions & 0 deletions internal/service/dms/endpoint_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,10 @@ func DataSourceEndpoint() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"glue_catalog_generation": {
Type: schema.TypeBool,
Computed: true,
},
"ignore_headers_row": {
Type: schema.TypeInt,
Computed: true,
Expand Down
7 changes: 7 additions & 0 deletions internal/service/dms/endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,7 @@ func TestAccDMSEndpoint_S3_basic(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.data_format", "csv"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.date_partition_enabled", "true"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.date_partition_sequence", "yyyymmddhh"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.glue_catalog_generation", "false"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.ignore_header_rows", "0"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.parquet_version", "parquet-1-0"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.parquet_timestamp_in_millisecond", "false"),
Expand Down Expand Up @@ -311,6 +312,7 @@ func TestAccDMSEndpoint_S3_basic(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.bucket_folder", "new-bucket_folder"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.bucket_name", "new-bucket_name"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.compression_type", "GZIP"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.glue_catalog_generation", "false"),
),
},
{
Expand Down Expand Up @@ -344,6 +346,8 @@ func TestAccDMSEndpoint_S3_detachTargetOnLobLookupFailureParquet(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.bucket_folder", "new-bucket_folder"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.bucket_name", "new-bucket_name"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.compression_type", "GZIP"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.glue_catalog_generation", "true"),
resource.TestCheckResourceAttr(resourceName, "s3_settings.0.include_op_for_full_load", "true"),
),
},
{
Expand Down Expand Up @@ -2987,6 +2991,7 @@ resource "aws_dms_endpoint" "test" {
bucket_folder = "new-bucket_folder"
bucket_name = "new-bucket_name"
compression_type = "GZIP"
glue_catalog_generation = false
}
}
Expand Down Expand Up @@ -3066,6 +3071,8 @@ resource "aws_dms_endpoint" "test" {
bucket_folder = "new-bucket_folder"
bucket_name = "new-bucket_name"
compression_type = "GZIP"
glue_catalog_generation = true
include_op_for_full_load = true
}
}
Expand Down
10 changes: 10 additions & 0 deletions internal/service/dms/s3_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,11 @@ func ResourceS3Endpoint() *schema.Resource {
return json
},
},
"glue_catalog_generation": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"ignore_header_rows": {
Type: schema.TypeInt,
Optional: true,
Expand Down Expand Up @@ -460,6 +465,7 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in
d.Set("date_partition_sequence", s3settings.DatePartitionSequence)
d.Set("date_partition_timezone", s3settings.DatePartitionTimezone)
d.Set("encryption_mode", s3settings.EncryptionMode)
d.Set("glue_catalog_generation", s3settings.GlueCatalogGeneration)
d.Set("parquet_timestamp_in_millisecond", s3settings.ParquetTimestampInMillisecond)
d.Set("parquet_version", s3settings.ParquetVersion)
d.Set("preserve_transactions", s3settings.PreserveTransactions)
Expand Down Expand Up @@ -674,6 +680,10 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings {
s3s.ExternalTableDefinition = aws.String(v.(string))
}

if v, ok := d.Get("glue_catalog_generation").(bool); ok { // target
s3s.GlueCatalogGeneration = aws.Bool(v)
}

if v, ok := d.GetOk("ignore_header_rows"); ok {
s3s.IgnoreHeaderRows = aws.Int64(int64(v.(int)))
}
Expand Down
8 changes: 7 additions & 1 deletion internal/service/dms/s3_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ func TestAccDMSS3Endpoint_basic(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "encoding_type", "plain"),
resource.TestCheckResourceAttr(resourceName, "encryption_mode", "SSE_S3"),
resource.TestCheckResourceAttrPair(resourceName, "expected_bucket_owner", "data.aws_caller_identity.current", "account_id"),
resource.TestCheckResourceAttr(resourceName, "glue_catalog_generation", "true"),
resource.TestCheckResourceAttr(resourceName, "ignore_header_rows", "1"),
resource.TestCheckResourceAttr(resourceName, "include_op_for_full_load", "true"),
resource.TestCheckResourceAttr(resourceName, "max_file_size", "1000000"),
Expand Down Expand Up @@ -118,6 +119,7 @@ func TestAccDMSS3Endpoint_update(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "encoding_type", "plain"),
resource.TestCheckResourceAttr(resourceName, "encryption_mode", "SSE_S3"),
resource.TestCheckResourceAttrPair(resourceName, "expected_bucket_owner", "data.aws_caller_identity.current", "account_id"),
resource.TestCheckResourceAttr(resourceName, "glue_catalog_generation", "true"),
resource.TestCheckResourceAttr(resourceName, "ignore_header_rows", "1"),
resource.TestCheckResourceAttr(resourceName, "include_op_for_full_load", "true"),
resource.TestCheckResourceAttr(resourceName, "max_file_size", "1000000"),
Expand Down Expand Up @@ -163,6 +165,7 @@ func TestAccDMSS3Endpoint_update(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "encoding_type", "plain"),
resource.TestCheckResourceAttr(resourceName, "encryption_mode", "SSE_S3"),
resource.TestCheckResourceAttrPair(resourceName, "expected_bucket_owner", "data.aws_caller_identity.current", "account_id"),
resource.TestCheckResourceAttr(resourceName, "glue_catalog_generation", "false"),
resource.TestCheckResourceAttr(resourceName, "ignore_header_rows", "1"),
resource.TestCheckResourceAttr(resourceName, "include_op_for_full_load", "false"),
resource.TestCheckResourceAttr(resourceName, "max_file_size", "900000"),
Expand Down Expand Up @@ -223,6 +226,7 @@ func TestAccDMSS3Endpoint_simple(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "encoding_type", ""),
resource.TestCheckResourceAttr(resourceName, "encryption_mode", ""),
resource.TestCheckResourceAttr(resourceName, "expected_bucket_owner", ""),
resource.TestCheckResourceAttr(resourceName, "glue_catalog_generation", "false"),
resource.TestCheckResourceAttr(resourceName, "ignore_header_rows", "0"),
resource.TestCheckResourceAttr(resourceName, "include_op_for_full_load", "false"),
resource.TestCheckResourceAttr(resourceName, "max_file_size", "0"),
Expand Down Expand Up @@ -302,7 +306,7 @@ func TestAccDMSS3Endpoint_sourceSimple(t *testing.T) {
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"compression_type", "date_partition_enabled", "parquet_timestamp_in_millisecond", "preserve_transactions", "use_csv_no_sup_value"},
ImportStateVerifyIgnore: []string{"compression_type", "date_partition_enabled", "parquet_timestamp_in_millisecond", "preserve_transactions", "use_csv_no_sup_value", "glue_catalog_generation"},
},
},
})
Expand Down Expand Up @@ -562,6 +566,7 @@ resource "aws_dms_s3_endpoint" "test" {
timestamp_column_name = "tx_commit_time"
use_csv_no_sup_value = false
use_task_start_time_for_full_load_timestamp = true
glue_catalog_generation = true
depends_on = [aws_iam_role_policy.test]
}
Expand Down Expand Up @@ -640,6 +645,7 @@ resource "aws_dms_s3_endpoint" "test" {
timestamp_column_name = "tx_commit_time2"
use_csv_no_sup_value = true
use_task_start_time_for_full_load_timestamp = false
glue_catalog_generation = false
depends_on = [aws_iam_role_policy.test]
}
Expand Down
1 change: 1 addition & 0 deletions website/docs/r/dms_endpoint.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ The following arguments are optional:
* `encoding_type` - (Optional) Type of encoding to use. Value values are `rle_dictionary`, `plain`, and `plain_dictionary`. Default is `rle_dictionary`.
* `encryption_mode` - (Optional) Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are `SSE_S3` and `SSE_KMS`. Default is `SSE_S3`.
* `external_table_definition` - (Optional) JSON document that describes how AWS DMS should interpret the data.
* `glue_catalog_generation` - (Optional) Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See [Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.GlueCatalog) for more information. Default is `false`.
* `ignore_header_rows` - (Optional) When this value is set to `1`, DMS ignores the first row header in a .csv file. Default is `0`.
* `include_op_for_full_load` - (Optional) Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is `false`.
* `max_file_size` - (Optional) Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from `1` to `1048576`. Default is `1048576` (1 GB).
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/dms_s3_endpoint.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ resource "aws_dms_s3_endpoint" "example" {
timestamp_column_name = "tx_commit_time"
use_csv_no_sup_value = false
use_task_start_time_for_full_load_timestamp = true
glue_catalog_generation = true
depends_on = [aws_iam_role_policy.example]
}
Expand Down Expand Up @@ -129,6 +130,7 @@ The following arguments are optional:
* `encoding_type` - (Optional) Type of encoding to use. Value values are `rle_dictionary`, `plain`, and `plain_dictionary`. (AWS default is `rle_dictionary`.)
* `encryption_mode` - (Optional) Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are `SSE_S3` and `SSE_KMS`. (AWS default is `SSE_S3`.) (Ignored for source endpoints -- only `SSE_S3` is valid.)
* `expected_bucket_owner` - (Optional) Bucket owner to prevent sniping. Value is an AWS account ID.
* `glue_catalog_generation` - (Optional) Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See [Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.GlueCatalog) for more information. Default is `false`.
* `ignore_header_rows` - (Optional, Force New) When this value is set to `1`, DMS ignores the first row header in a .csv file. (AWS default is `0`.)
* `include_op_for_full_load` - (Optional) Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is `false`.
* `kms_key_arn` - (Optional) ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
Expand Down

0 comments on commit d822748

Please sign in to comment.