From 6ccac596d2115cb0ecf9475a3c10c2e0794259b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Tue, 25 Jun 2024 20:09:32 +0200 Subject: [PATCH] chore: apply minor database changes (#2872) Apply the rest of the changes requested in #2842 - Moved out a schema for the show database output - Throw error in state upgrader for certain cases we cannot upgrade to - Update state upgrader to map account locators to the recommended account identifier format - Use more strict mapping functions in update functions for all databases (+ test them) To add (in this pr) - Better documentation for old database `from_share` field TODO - Find out which test is setting data retention time in days on the account and doesn't call unset (causing `TestAcc_Database_IntParameter` to fail). --- MIGRATION_GUIDE.md | 7 +- docs/data-sources/databases.md | 245 ++++++++++++++++-- .../snowflake_databases/data-source.tf | 2 +- pkg/datasources/databases.go | 134 ++-------- pkg/datasources/databases_acceptance_test.go | 89 ++++--- pkg/resources/database_acceptance_test.go | 104 +++++--- pkg/resources/database_commons.go | 26 +- pkg/resources/database_state_upgraders.go | 48 +++- pkg/resources/helpers.go | 4 + pkg/schemas/database.go | 106 ++++++++ pkg/schemas/database_parameters.go | 47 ++++ pkg/schemas/parameter.go | 46 ++-- pkg/schemas/warehouse_parameters.go | 36 +-- pkg/sdk/common_types.go | 36 +++ pkg/sdk/common_types_test.go | 103 ++++++++ pkg/sdk/databases.go | 12 + 16 files changed, 762 insertions(+), 283 deletions(-) create mode 100644 pkg/schemas/database.go create mode 100644 pkg/schemas/database_parameters.go diff --git a/MIGRATION_GUIDE.md b/MIGRATION_GUIDE.md index 37a152c3d9..d524a69be7 100644 --- a/MIGRATION_GUIDE.md +++ b/MIGRATION_GUIDE.md @@ -77,7 +77,7 @@ All the field changes in comparison to the previous database resource are: - `from_share` - the parameter was moved to the dedicated resource for databases created from shares `snowflake_shared_database`. Right now, it's a text field instead of a map. Additionally, instead of legacy account identifier format we're expecting the new one that with share looks like this: `..`. For more information on account identifiers, visit the [official documentation](https://docs.snowflake.com/en/user-guide/admin-account-identifier). - p, - `from_replication` - the parameter was moved to the dedicated resource for databases created from primary databases `snowflake_secondary_database` -- `replication_configuration` - renamed: was renamed to `configuration` and is only available in the `snowflake_database`. Its internal schema changed that instead of list of accounts, we expect a list of nested objects with accounts for which replication (and optionally failover) should be enabled. More information about converting between both versions [here](#resource-renamed-snowflake_database---snowflake_database_old). Additionally, instead of legacy account identifier format we're expecting the new one that looks like this: `.`. For more information on account identifiers, visit the [official documentation](https://docs.snowflake.com/en/user-guide/admin-account-identifier). +- `replication_configuration` - renamed: was renamed to `configuration` and is only available in the `snowflake_database`. Its internal schema changed that instead of list of accounts, we expect a list of nested objects with accounts for which replication (and optionally failover) should be enabled. More information about converting between both versions [here](#resource-renamed-snowflake_database---snowflake_database_old). Additionally, instead of legacy account identifier format we're expecting the new one that looks like this: `.` (it will be automatically migrated to the recommended format by the state upgrader). For more information on account identifiers, visit the [official documentation](https://docs.snowflake.com/en/user-guide/admin-account-identifier). - `data_retention_time_in_days` - in `snowflake_shared_database` - removed: the field is removed from `snowflake_shared_database` as it doesn't have any effect on shared databases. @@ -158,9 +158,10 @@ The only difference would be that instead of writing/generating new configuratio - `replication_configuration` field was removed from `databases`. - `pattern` was replaced by `like` field. - Additional filtering options added (`limit`). -- Added missing fields returned by SHOW DATABASES. +- Added missing fields returned by SHOW DATABASES and enclosed its output in `show_output` field. - Added outputs from **DESC DATABASE** and **SHOW PARAMETERS IN DATABASE** (they can be turned off by declaring `with_describe = false` and `with_parameters = false`, **they're turned on by default**). -The additional parameters call **DESC DATABASE** (with `with_describe` turned on) and **SHOW PARAMETERS IN DATABASE** (with `with_parameters` turned on) **per database** returned by **SHOW DATABASES**. +The additional parameters call **DESC DATABASE** (with `with_describe` turned on) and **SHOW PARAMETERS IN DATABASE** (with `with_parameters` turned on) **per database** returned by **SHOW DATABASES**. +The outputs of both commands are held in `databases` entry, where **DESC DATABASE** is saved in the `describe_output` field, and **SHOW PARAMETERS IN DATABASE** in the `parameters` field. It's important to limit the records and calls to Snowflake to the minimum. That's why we recommend assessing which information you need from the data source and then providing strong filters and turning off additional fields for better plan performance. ## v0.89.0 ➞ v0.90.0 diff --git a/docs/data-sources/databases.md b/docs/data-sources/databases.md index 47097a56a0..b7deae35bf 100644 --- a/docs/data-sources/databases.md +++ b/docs/data-sources/databases.md @@ -52,7 +52,7 @@ output "limit_output" { # Without additional data (to limit the number of calls make for every found database) data "snowflake_databases" "only_show" { - # with_describe is turned on by default and it calls DESCRIBE DATABASE for every database found and attaches its output to databases.*.description field + # with_describe is turned on by default and it calls DESCRIBE DATABASE for every database found and attaches its output to databases.*.describe_output field with_describe = false # with_parameters is turned on by default and it calls SHOW PARAMETERS FOR DATABASE for every database found and attaches its output to databases.*.parameters field @@ -120,24 +120,12 @@ Optional: Read-Only: -- `comment` (String) -- `created_on` (String) -- `description` (List of Object) (see [below for nested schema](#nestedobjatt--databases--description)) -- `is_current` (Boolean) -- `is_default` (Boolean) -- `is_transient` (Boolean) -- `kind` (String) -- `name` (String) -- `options` (String) -- `origin` (String) -- `owner` (String) -- `owner_role_type` (String) +- `describe_output` (List of Object) (see [below for nested schema](#nestedobjatt--databases--describe_output)) - `parameters` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters)) -- `resource_group` (String) -- `retention_time` (Number) +- `show_output` (List of Object) (see [below for nested schema](#nestedobjatt--databases--show_output)) - -### Nested Schema for `databases.description` + +### Nested Schema for `databases.describe_output` Read-Only: @@ -151,8 +139,231 @@ Read-Only: Read-Only: +- `catalog` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--catalog)) +- `data_retention_time_in_days` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--data_retention_time_in_days)) +- `default_ddl_collation` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--default_ddl_collation)) +- `enable_console_output` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--enable_console_output)) +- `external_volume` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--external_volume)) +- `log_level` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--log_level)) +- `max_data_extension_time_in_days` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--max_data_extension_time_in_days)) +- `quoted_identifiers_ignore_case` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--quoted_identifiers_ignore_case)) +- `replace_invalid_characters` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--replace_invalid_characters)) +- `storage_serialization_policy` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--storage_serialization_policy)) +- `suspend_task_after_num_failures` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--suspend_task_after_num_failures)) +- `task_auto_retry_attempts` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--task_auto_retry_attempts)) +- `trace_level` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--trace_level)) +- `user_task_managed_initial_warehouse_size` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--user_task_managed_initial_warehouse_size)) +- `user_task_minimum_trigger_interval_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--user_task_minimum_trigger_interval_in_seconds)) +- `user_task_timeout_ms` (List of Object) (see [below for nested schema](#nestedobjatt--databases--parameters--user_task_timeout_ms)) + + +### Nested Schema for `databases.parameters.catalog` + +Read-Only: + - `default` (String) - `description` (String) - `key` (String) - `level` (String) - `value` (String) + + + +### Nested Schema for `databases.parameters.data_retention_time_in_days` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.default_ddl_collation` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.enable_console_output` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.external_volume` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.log_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.max_data_extension_time_in_days` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.quoted_identifiers_ignore_case` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.replace_invalid_characters` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.storage_serialization_policy` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.suspend_task_after_num_failures` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.task_auto_retry_attempts` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.trace_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.user_task_managed_initial_warehouse_size` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.user_task_minimum_trigger_interval_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `databases.parameters.user_task_timeout_ms` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + + +### Nested Schema for `databases.show_output` + +Read-Only: + +- `comment` (String) +- `created_on` (String) +- `is_current` (Boolean) +- `is_default` (Boolean) +- `is_transient` (Boolean) +- `kind` (String) +- `name` (String) +- `options` (String) +- `origin` (String) +- `owner` (String) +- `owner_role_type` (String) +- `resource_group` (String) +- `retention_time` (Number) diff --git a/examples/data-sources/snowflake_databases/data-source.tf b/examples/data-sources/snowflake_databases/data-source.tf index f6f21658df..1c235421f3 100644 --- a/examples/data-sources/snowflake_databases/data-source.tf +++ b/examples/data-sources/snowflake_databases/data-source.tf @@ -38,7 +38,7 @@ output "limit_output" { # Without additional data (to limit the number of calls make for every found database) data "snowflake_databases" "only_show" { - # with_describe is turned on by default and it calls DESCRIBE DATABASE for every database found and attaches its output to databases.*.description field + # with_describe is turned on by default and it calls DESCRIBE DATABASE for every database found and attaches its output to databases.*.describe_output field with_describe = false # with_parameters is turned on by default and it calls SHOW PARAMETERS FOR DATABASE for every database found and attaches its output to databases.*.parameters field diff --git a/pkg/datasources/databases.go b/pkg/datasources/databases.go index c8ddae417a..4315f401f4 100644 --- a/pkg/datasources/databases.go +++ b/pkg/datasources/databases.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -59,77 +61,20 @@ var databasesSchema = map[string]*schema.Schema{ Description: "Holds the output of SHOW DATABASES.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "created_on": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "kind": { - Type: schema.TypeString, - Computed: true, - }, - "is_transient": { - Type: schema.TypeBool, - Computed: true, - }, - "is_default": { - Type: schema.TypeBool, - Computed: true, - }, - "is_current": { - Type: schema.TypeBool, - Computed: true, - }, - "origin": { - Type: schema.TypeString, - Computed: true, - }, - "owner": { - Type: schema.TypeString, - Computed: true, - }, - "comment": { - Type: schema.TypeString, - Computed: true, - }, - "options": { - Type: schema.TypeString, - Computed: true, - }, - "retention_time": { - Type: schema.TypeInt, - Computed: true, - }, - "resource_group": { - Type: schema.TypeString, - Computed: true, - }, - "owner_role_type": { - Type: schema.TypeString, - Computed: true, + "show_output": { + Type: schema.TypeList, + Computed: true, + Description: "Holds the output of SHOW DATABASES.", + Elem: &schema.Resource{ + Schema: schemas.ShowDatabaseSchema, + }, }, - "description": { + "describe_output": { Type: schema.TypeList, Computed: true, Description: "Holds the output of DESCRIBE DATABASE.", Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "created_on": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "kind": { - Type: schema.TypeString, - Computed: true, - }, - }, + Schema: schemas.DatabaseDescribeSchema, }, }, "parameters": { @@ -137,28 +82,7 @@ var databasesSchema = map[string]*schema.Schema{ Computed: true, Description: "Holds the output of SHOW PARAMETERS FOR DATABASE.", Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Computed: true, - }, - "value": { - Type: schema.TypeString, - Computed: true, - }, - "level": { - Type: schema.TypeString, - Computed: true, - }, - "default": { - Type: schema.TypeString, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Computed: true, - }, - }, + Schema: schemas.ShowDatabaseParametersSchema, }, }, }, @@ -217,13 +141,7 @@ func ReadDatabases(ctx context.Context, d *schema.ResourceData, meta any) diag.D if err != nil { return diag.FromErr(err) } - for _, description := range describeResult.Rows { - databaseDescription = append(databaseDescription, map[string]any{ - "created_on": description.CreatedOn.String(), - "name": description.Name, - "kind": description.Kind, - }) - } + databaseDescription = schemas.DatabaseDescriptionToSchema(*describeResult) } var databaseParameters []map[string]any @@ -236,32 +154,12 @@ func ReadDatabases(ctx context.Context, d *schema.ResourceData, meta any) diag.D if err != nil { return diag.FromErr(err) } - for _, parameter := range parameters { - databaseParameters = append(databaseParameters, map[string]any{ - "key": parameter.Key, - "value": parameter.Value, - "default": parameter.Default, - "level": string(parameter.Level), - "description": parameter.Description, - }) - } + databaseParameters = []map[string]any{schemas.DatabaseParametersToSchema(parameters)} } flattenedDatabases[i] = map[string]any{ - "created_on": database.CreatedOn.String(), - "name": database.Name, - "kind": database.Kind, - "is_transient": database.Transient, - "is_default": database.IsDefault, - "is_current": database.IsCurrent, - "origin": database.Origin, - "owner": database.Owner, - "comment": database.Comment, - "options": database.Options, - "retention_time": database.RetentionTime, - "resource_group": database.ResourceGroup, - "owner_role_type": database.OwnerRoleType, - "description": databaseDescription, + "show_output": []map[string]any{schemas.DatabaseShowToSchema(database)}, + "describe_output": databaseDescription, "parameters": databaseParameters, } } diff --git a/pkg/datasources/databases_acceptance_test.go b/pkg/datasources/databases_acceptance_test.go index 9989a1ad77..e938706c99 100644 --- a/pkg/datasources/databases_acceptance_test.go +++ b/pkg/datasources/databases_acceptance_test.go @@ -3,7 +3,6 @@ package datasources_test import ( "maps" "regexp" - "strconv" "testing" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" @@ -23,7 +22,7 @@ func TestAcc_Databases_Complete(t *testing.T) { configVariables := config.Variables{ "name": config.StringVariable(databaseName), "comment": config.StringVariable(comment), - "account_identifier": config.StringVariable(strconv.Quote(acc.SecondaryTestClient().Account.GetAccountIdentifier(t).FullyQualifiedName())), + "account_identifier": config.StringVariable(acc.SecondaryTestClient().Account.GetAccountIdentifier(t).FullyQualifiedName()), } resource.Test(t, resource.TestCase{ @@ -38,32 +37,42 @@ func TestAcc_Databases_Complete(t *testing.T) { ConfigVariables: configVariables, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.#", "1"), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.created_on"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.name", databaseName), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.kind", "STANDARD"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.is_transient", "false"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.is_default", "false"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.show_output.0.created_on"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.name", databaseName), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.kind", "STANDARD"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.is_transient", "false"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.is_default", "false"), // Commenting as this value depends on the currently used database, which is different when running as a single test and multiple tests (e.g., on CI) // resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.is_current", "true"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.origin", ""), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.owner"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.comment", comment), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.options", ""), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.retention_time"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.resource_group", ""), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.owner_role_type"), - - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.description.#", "2"), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.description.0.created_on"), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.description.0.name"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.description.0.kind", "SCHEMA"), - - resource.TestCheckResourceAttrWith("data.snowflake_databases.test", "databases.0.parameters.#", acc.IsGreaterOrEqualTo(10)), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.key"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.parameters.0.value", ""), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.parameters.0.default", ""), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.parameters.0.level", ""), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.description"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.origin", ""), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.show_output.0.owner"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.comment", comment), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.options", ""), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.show_output.0.retention_time"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.resource_group", ""), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.show_output.0.owner_role_type"), + + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.describe_output.#", "2"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.describe_output.0.created_on"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.describe_output.0.name"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.describe_output.0.kind", "SCHEMA"), + + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.parameters.#", "1"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.data_retention_time_in_days.0.value"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.max_data_extension_time_in_days.0.value"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.parameters.0.external_volume.0.value", ""), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.parameters.0.catalog.0.value", ""), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.replace_invalid_characters.0.value"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.parameters.0.default_ddl_collation.0.value", ""), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.storage_serialization_policy.0.value"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.log_level.0.value"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.trace_level.0.value"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.suspend_task_after_num_failures.0.value"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.task_auto_retry_attempts.0.value"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.user_task_managed_initial_warehouse_size.0.value"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.user_task_minimum_trigger_interval_in_seconds.0.value"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.quoted_identifiers_ignore_case.0.value"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.parameters.0.enable_console_output.0.value"), ), }, { @@ -71,22 +80,22 @@ func TestAcc_Databases_Complete(t *testing.T) { ConfigVariables: configVariables, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.#", "1"), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.created_on"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.name", databaseName), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.kind", "STANDARD"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.is_transient", "false"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.is_default", "false"), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.show_output.0.created_on"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.name", databaseName), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.kind", "STANDARD"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.is_transient", "false"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.is_default", "false"), // Commenting for the same reason as above // resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.is_current", "false"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.origin", ""), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.owner"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.comment", comment), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.options", ""), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.retention_time"), - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.resource_group", ""), - resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.owner_role_type"), - - resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.description.#", "0"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.origin", ""), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.show_output.0.owner"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.comment", comment), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.options", ""), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.show_output.0.retention_time"), + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.show_output.0.resource_group", ""), + resource.TestCheckResourceAttrSet("data.snowflake_databases.test", "databases.0.show_output.0.owner_role_type"), + + resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.describe_output.#", "0"), resource.TestCheckResourceAttr("data.snowflake_databases.test", "databases.0.parameters.#", "0"), ), }, diff --git a/pkg/resources/database_acceptance_test.go b/pkg/resources/database_acceptance_test.go index f25ce17db2..cdd9466069 100644 --- a/pkg/resources/database_acceptance_test.go +++ b/pkg/resources/database_acceptance_test.go @@ -2,6 +2,7 @@ package resources_test import ( "fmt" + "regexp" "strconv" "testing" @@ -1112,7 +1113,10 @@ func TestAcc_Database_WithReplication(t *testing.T) { Config: databaseStateUpgraderWithReplicationNew(id, secondaryAccountIdentifier), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ - // plancheck.ExpectNonEmptyPlan(), // Account locators have to be changed to the new account identifier format + planchecks.PrintPlanDetails("snowflake_database.test", "replication"), + // Updates in place (no ALTER DATABASE is called) + planchecks.ExpectChange("snowflake_database.test", "replication.0.ignore_edition_check", tfjson.ActionUpdate, sdk.String("false"), sdk.String("true")), + planchecks.ExpectChange("snowflake_database.test", "replication.0.enable_to_account", tfjson.ActionUpdate, sdk.String(fmt.Sprintf("[map[account_identifier:%s with_failover:false]]", secondaryAccountIdentifier)), sdk.String(fmt.Sprintf("[map[account_identifier:%s with_failover:false]]", secondaryAccountIdentifier))), }, }, Check: resource.ComposeTestCheckFunc( @@ -1170,7 +1174,6 @@ func TestAcc_Database_UpgradeFromShare(t *testing.T) { TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.RequireAbove(tfversion.Version1_5_0), }, - CheckDestroy: acc.CheckDestroy(t, resources.Database), Steps: []resource.TestStep{ { ExternalProviders: map[string]resource.ExternalProvider{ @@ -1189,17 +1192,15 @@ func TestAcc_Database_UpgradeFromShare(t *testing.T) { }, { ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, - Config: databaseStateUpgraderFromShareNew(id), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectEmptyPlan(), - }, - }, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_database.test", "id", id.Name()), - resource.TestCheckResourceAttr("snowflake_database.test", "name", id.Name()), - resource.TestCheckNoResourceAttr("snowflake_database.test", "from_share"), - ), + Config: databaseStateUpgraderFromShareNewAfterUpgrade(id), + ExpectError: regexp.MustCompile("failed to upgrade the state with database created from share, please use snowflake_shared_database or deprecated snowflake_database_old instead"), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: databaseStateUpgraderFromShareNew(id, shareExternalId), + ResourceName: "snowflake_shared_database.test", + ImportStateId: id.FullyQualifiedName(), + ImportState: true, }, }, }) @@ -1218,15 +1219,24 @@ resource "snowflake_database" "test" { `, id.Name(), secondaryClientLocator, externalShare.Name()) } -func databaseStateUpgraderFromShareNew(id sdk.AccountObjectIdentifier) string { +func databaseStateUpgraderFromShareNewAfterUpgrade(id sdk.AccountObjectIdentifier) string { return fmt.Sprintf(` resource "snowflake_database" "test" { name = "%s" - data_retention_time_in_days = 0 + data_retention_time_in_days = 0 # to avoid in-place update to -1 } `, id.Name()) } +func databaseStateUpgraderFromShareNew(id sdk.AccountObjectIdentifier, externalShare sdk.ExternalObjectIdentifier) string { + return fmt.Sprintf(` +resource "snowflake_shared_database" "test" { + name = "%s" + from_share = %s +} +`, id.Name(), strconv.Quote(externalShare.FullyQualifiedName())) +} + func TestAcc_Database_UpgradeFromReplica(t *testing.T) { _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) @@ -1241,7 +1251,6 @@ func TestAcc_Database_UpgradeFromReplica(t *testing.T) { TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.RequireAbove(tfversion.Version1_5_0), }, - CheckDestroy: acc.CheckDestroy(t, resources.Database), Steps: []resource.TestStep{ { ExternalProviders: map[string]resource.ExternalProvider{ @@ -1259,17 +1268,15 @@ func TestAcc_Database_UpgradeFromReplica(t *testing.T) { }, { ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, - Config: databaseStateUpgraderFromReplicaNew(id), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectEmptyPlan(), - }, - }, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_database.test", "id", id.Name()), - resource.TestCheckResourceAttr("snowflake_database.test", "name", id.Name()), - resource.TestCheckNoResourceAttr("snowflake_database.test", "from_replica"), - ), + Config: databaseStateUpgraderFromReplicaNewAfterUpgrade(id), + ExpectError: regexp.MustCompile("failed to upgrade the state with database created from replica, please use snowflake_secondary_database or deprecated snowflake_database_old instead"), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: databaseStateUpgraderFromReplicaNew(id, primaryDatabaseId), + ResourceName: "snowflake_secondary_database.test", + ImportStateId: id.FullyQualifiedName(), + ImportState: true, }, }, }) @@ -1285,7 +1292,7 @@ resource "snowflake_database" "test" { `, id.Name(), strconv.Quote(primaryDatabaseId.FullyQualifiedName())) } -func databaseStateUpgraderFromReplicaNew(id sdk.AccountObjectIdentifier) string { +func databaseStateUpgraderFromReplicaNewAfterUpgrade(id sdk.AccountObjectIdentifier) string { return fmt.Sprintf(` resource "snowflake_database" "test" { name = "%s" @@ -1294,6 +1301,15 @@ resource "snowflake_database" "test" { `, id.Name()) } +func databaseStateUpgraderFromReplicaNew(id sdk.AccountObjectIdentifier, primaryDatabaseId sdk.ExternalObjectIdentifier) string { + return fmt.Sprintf(` +resource "snowflake_secondary_database" "test" { + name = "%s" + as_replica_of = %s +} +`, id.Name(), strconv.Quote(id.FullyQualifiedName())) +} + func TestAcc_Database_UpgradeFromClonedDatabase(t *testing.T) { id := acc.TestClient().Ids.RandomAccountObjectIdentifier() cloneId := acc.TestClient().Ids.RandomAccountObjectIdentifier() @@ -1319,19 +1335,17 @@ func TestAcc_Database_UpgradeFromClonedDatabase(t *testing.T) { resource.TestCheckResourceAttr("snowflake_database.cloned", "from_database", id.Name()), ), }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: databaseStateUpgraderFromDatabaseNewAfterUpgrade(id, cloneId), + ExpectError: regexp.MustCompile("failed to upgrade the state with database created from database, please use snowflake_database or deprecated snowflake_database_old instead"), + }, { ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, Config: databaseStateUpgraderFromDatabaseNew(id, cloneId), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{ - plancheck.ExpectEmptyPlan(), - }, - }, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_database.cloned", "id", cloneId.Name()), - resource.TestCheckResourceAttr("snowflake_database.cloned", "name", cloneId.Name()), - resource.TestCheckNoResourceAttr("snowflake_database.cloned", "from_database"), - ), + ResourceName: "snowflake_database.cloned", + ImportStateId: cloneId.FullyQualifiedName(), + ImportState: true, }, }, }) @@ -1352,7 +1366,7 @@ resource "snowflake_database" "cloned" { `, id.Name(), secondId.Name()) } -func databaseStateUpgraderFromDatabaseNew(id sdk.AccountObjectIdentifier, secondId sdk.AccountObjectIdentifier) string { +func databaseStateUpgraderFromDatabaseNewAfterUpgrade(id sdk.AccountObjectIdentifier, secondId sdk.AccountObjectIdentifier) string { return fmt.Sprintf(` resource "snowflake_database" "test" { name = "%s" @@ -1365,3 +1379,15 @@ resource "snowflake_database" "cloned" { } `, id.Name(), secondId.Name()) } + +func databaseStateUpgraderFromDatabaseNew(id sdk.AccountObjectIdentifier, secondId sdk.AccountObjectIdentifier) string { + return fmt.Sprintf(` +resource "snowflake_database" "test" { + name = "%s" +} + +resource "snowflake_database" "cloned" { + name = "%s" +} +`, id.Name(), secondId.Name()) +} diff --git a/pkg/resources/database_commons.go b/pkg/resources/database_commons.go index 0f3fb84f1a..b2a1cf307e 100644 --- a/pkg/resources/database_commons.go +++ b/pkg/resources/database_commons.go @@ -259,16 +259,20 @@ func HandleDatabaseParametersChanges(d *schema.ResourceData, set *sdk.DatabaseSe return JoinDiags( handleValuePropertyChange[int](d, "data_retention_time_in_days", &set.DataRetentionTimeInDays, &unset.DataRetentionTimeInDays), handleValuePropertyChange[int](d, "max_data_extension_time_in_days", &set.MaxDataExtensionTimeInDays, &unset.MaxDataExtensionTimeInDays), - handleValuePropertyChangeWithMapping[string](d, "external_volume", &set.ExternalVolume, &unset.ExternalVolume, sdk.NewAccountObjectIdentifier), - handleValuePropertyChangeWithMapping[string](d, "catalog", &set.Catalog, &unset.Catalog, sdk.NewAccountObjectIdentifier), + handleValuePropertyChangeWithMapping[string](d, "external_volume", &set.ExternalVolume, &unset.ExternalVolume, func(value string) (sdk.AccountObjectIdentifier, error) { + return sdk.NewAccountObjectIdentifier(value), nil + }), + handleValuePropertyChangeWithMapping[string](d, "catalog", &set.Catalog, &unset.Catalog, func(value string) (sdk.AccountObjectIdentifier, error) { + return sdk.NewAccountObjectIdentifier(value), nil + }), handleValuePropertyChange[bool](d, "replace_invalid_characters", &set.ReplaceInvalidCharacters, &unset.ReplaceInvalidCharacters), handleValuePropertyChange[string](d, "default_ddl_collation", &set.DefaultDDLCollation, &unset.DefaultDDLCollation), - handleValuePropertyChangeWithMapping[string](d, "storage_serialization_policy", &set.StorageSerializationPolicy, &unset.StorageSerializationPolicy, func(value string) sdk.StorageSerializationPolicy { return sdk.StorageSerializationPolicy(value) }), - handleValuePropertyChangeWithMapping[string](d, "log_level", &set.LogLevel, &unset.LogLevel, func(value string) sdk.LogLevel { return sdk.LogLevel(value) }), - handleValuePropertyChangeWithMapping[string](d, "trace_level", &set.TraceLevel, &unset.TraceLevel, func(value string) sdk.TraceLevel { return sdk.TraceLevel(value) }), + handleValuePropertyChangeWithMapping[string](d, "storage_serialization_policy", &set.StorageSerializationPolicy, &unset.StorageSerializationPolicy, sdk.ToStorageSerializationPolicy), + handleValuePropertyChangeWithMapping[string](d, "log_level", &set.LogLevel, &unset.LogLevel, sdk.ToLogLevel), + handleValuePropertyChangeWithMapping[string](d, "trace_level", &set.TraceLevel, &unset.TraceLevel, sdk.ToTraceLevel), handleValuePropertyChange[int](d, "suspend_task_after_num_failures", &set.SuspendTaskAfterNumFailures, &unset.SuspendTaskAfterNumFailures), handleValuePropertyChange[int](d, "task_auto_retry_attempts", &set.TaskAutoRetryAttempts, &unset.TaskAutoRetryAttempts), - handleValuePropertyChangeWithMapping[string](d, "user_task_managed_initial_warehouse_size", &set.UserTaskManagedInitialWarehouseSize, &unset.UserTaskManagedInitialWarehouseSize, func(value string) sdk.WarehouseSize { return sdk.WarehouseSize(value) }), + handleValuePropertyChangeWithMapping[string](d, "user_task_managed_initial_warehouse_size", &set.UserTaskManagedInitialWarehouseSize, &unset.UserTaskManagedInitialWarehouseSize, sdk.ToWarehouseSize), handleValuePropertyChange[int](d, "user_task_timeout_ms", &set.UserTaskTimeoutMs, &unset.UserTaskTimeoutMs), handleValuePropertyChange[int](d, "user_task_minimum_trigger_interval_in_seconds", &set.UserTaskMinimumTriggerIntervalInSeconds, &unset.UserTaskMinimumTriggerIntervalInSeconds), handleValuePropertyChange[bool](d, "quoted_identifiers_ignore_case", &set.QuotedIdentifiersIgnoreCase, &unset.QuotedIdentifiersIgnoreCase), @@ -278,17 +282,21 @@ func HandleDatabaseParametersChanges(d *schema.ResourceData, set *sdk.DatabaseSe // handleValuePropertyChange calls internally handleValuePropertyChangeWithMapping with identity mapping func handleValuePropertyChange[T any](d *schema.ResourceData, key string, setField **T, unsetField **bool) diag.Diagnostics { - return handleValuePropertyChangeWithMapping[T, T](d, key, setField, unsetField, func(value T) T { return value }) + return handleValuePropertyChangeWithMapping[T, T](d, key, setField, unsetField, func(value T) (T, error) { return value, nil }) } // handleValuePropertyChangeWithMapping checks schema.ResourceData for change in key's value. If there's a change detected // (or unknown value that basically indicates diff.SetNewComputed was called on the key), it checks if the value is set in the configuration. // If the value is set, setField (representing setter for a value) is set to the new planned value applying mapping beforehand in cases where enum values, // identifiers, etc. have to be set. Otherwise, unsetField is populated. -func handleValuePropertyChangeWithMapping[T, R any](d *schema.ResourceData, key string, setField **R, unsetField **bool, mapping func(value T) R) diag.Diagnostics { +func handleValuePropertyChangeWithMapping[T, R any](d *schema.ResourceData, key string, setField **R, unsetField **bool, mapping func(value T) (R, error)) diag.Diagnostics { if d.HasChange(key) || !d.GetRawPlan().AsValueMap()[key].IsKnown() { if !d.GetRawConfig().AsValueMap()[key].IsNull() { - *setField = sdk.Pointer(mapping(d.Get(key).(T))) + mappedValue, err := mapping(d.Get(key).(T)) + if err != nil { + return diag.FromErr(err) + } + *setField = sdk.Pointer(mappedValue) } else { *unsetField = sdk.Bool(true) } diff --git a/pkg/resources/database_state_upgraders.go b/pkg/resources/database_state_upgraders.go index df04f75d13..f028487eb1 100644 --- a/pkg/resources/database_state_upgraders.go +++ b/pkg/resources/database_state_upgraders.go @@ -2,27 +2,59 @@ package resources import ( "context" + "fmt" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" ) func v092DatabaseStateUpgrader(ctx context.Context, rawState map[string]any, meta any) (map[string]any, error) { + client := meta.(*provider.Context).Client + if rawState == nil { return rawState, nil } + if v, ok := rawState["from_share"]; ok && v != nil && len(v.(map[string]any)) > 0 { + return nil, fmt.Errorf("failed to upgrade the state with database created from share, please use snowflake_shared_database or deprecated snowflake_database_old instead") + } + + if v, ok := rawState["from_replica"]; ok && v != nil && len(v.(string)) > 0 { + return nil, fmt.Errorf("failed to upgrade the state with database created from replica, please use snowflake_secondary_database or deprecated snowflake_database_old instead") + } + + if v, ok := rawState["from_database"]; ok && v != nil && len(v.(string)) > 0 { + return nil, fmt.Errorf("failed to upgrade the state with database created from database, please use snowflake_database or deprecated snowflake_database_old instead. Dislaimer: Right now, database cloning is not supported. They can be imported into mentioned resources, but any differetnce in behavior from standard database won't be handled (and can result in errors)") + } + if replicationConfigurations, ok := rawState["replication_configuration"]; ok && len(replicationConfigurations.([]any)) == 1 { replicationConfiguration := replicationConfigurations.([]any)[0].(map[string]any) replication := make(map[string]any) - replication["ignore_edition_check"] = replicationConfiguration["ignore_edition_check"] + replication["ignore_edition_check"] = replicationConfiguration["ignore_edition_check"].(bool) + + accountLocators := replicationConfiguration["accounts"].([]any) + enableForAccounts := make([]map[string]any, len(accountLocators)) - accounts := replicationConfiguration["accounts"].([]any) - enableForAccounts := make([]map[string]any, len(accounts)) - for i, account := range accounts { - enableForAccounts[i] = map[string]any{ - "account_identifier": account, + if len(accountLocators) > 0 { + replicationAccounts, err := client.ReplicationFunctions.ShowReplicationAccounts(ctx) + if err != nil { + return nil, err } - } - rawState["replication"] = []any{replication} + for i, accountLocator := range accountLocators { + replicationAccount, err := collections.FindOne(replicationAccounts, func(account *sdk.ReplicationAccount) bool { + return account.AccountLocator == accountLocator + }) + if err != nil { + return nil, fmt.Errorf("couldn't find replication account locator '%s', err = %w", accountLocator, err) + } + foundReplicationAccount := *replicationAccount + enableForAccounts[i] = map[string]any{ + "account_identifier": sdk.NewAccountIdentifier(foundReplicationAccount.OrganizationName, foundReplicationAccount.AccountName), + } + } + } } return rawState, nil diff --git a/pkg/resources/helpers.go b/pkg/resources/helpers.go index 70315b087f..cbf936c2ba 100644 --- a/pkg/resources/helpers.go +++ b/pkg/resources/helpers.go @@ -283,6 +283,10 @@ func getTags(from interface{}) (to tags) { return to } +// TODO(SNOW-1479870): Test +// MergeMaps takes any number of maps (of the same type) and concatenates them. +// In case of key collision, the value will be selected from the map that is provided +// later in the src function parameter. func MergeMaps[M ~map[K]V, K comparable, V any](src ...M) M { merged := make(M) for _, m := range src { diff --git a/pkg/schemas/database.go b/pkg/schemas/database.go new file mode 100644 index 0000000000..e1aa3dc6bd --- /dev/null +++ b/pkg/schemas/database.go @@ -0,0 +1,106 @@ +package schemas + +import ( + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var ShowDatabaseSchema = map[string]*schema.Schema{ + "created_on": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "kind": { + Type: schema.TypeString, + Computed: true, + }, + "is_transient": { + Type: schema.TypeBool, + Computed: true, + }, + "is_default": { + Type: schema.TypeBool, + Computed: true, + }, + "is_current": { + Type: schema.TypeBool, + Computed: true, + }, + "origin": { + Type: schema.TypeString, + Computed: true, + }, + "owner": { + Type: schema.TypeString, + Computed: true, + }, + "comment": { + Type: schema.TypeString, + Computed: true, + }, + "options": { + Type: schema.TypeString, + Computed: true, + }, + "retention_time": { + Type: schema.TypeInt, + Computed: true, + }, + "resource_group": { + Type: schema.TypeString, + Computed: true, + }, + "owner_role_type": { + Type: schema.TypeString, + Computed: true, + }, +} + +func DatabaseShowToSchema(database sdk.Database) map[string]any { + return map[string]any{ + "created_on": database.CreatedOn.String(), + "name": database.Name, + "kind": database.Kind, + "is_transient": database.Transient, + "is_default": database.IsDefault, + "is_current": database.IsCurrent, + "origin": database.Origin, + "owner": database.Owner, + "comment": database.Comment, + "options": database.Options, + "retention_time": database.RetentionTime, + "resource_group": database.ResourceGroup, + "owner_role_type": database.OwnerRoleType, + } +} + +var DatabaseDescribeSchema = map[string]*schema.Schema{ + "created_on": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "kind": { + Type: schema.TypeString, + Computed: true, + }, +} + +func DatabaseDescriptionToSchema(description sdk.DatabaseDetails) []map[string]any { + result := make([]map[string]any, len(description.Rows)) + for i, row := range description.Rows { + result[i] = map[string]any{ + "created_on": row.CreatedOn.String(), + "name": row.Name, + "kind": row.Kind, + } + } + return result +} diff --git a/pkg/schemas/database_parameters.go b/pkg/schemas/database_parameters.go new file mode 100644 index 0000000000..11e7650048 --- /dev/null +++ b/pkg/schemas/database_parameters.go @@ -0,0 +1,47 @@ +package schemas + +import ( + "slices" + "strings" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var ( + ShowDatabaseParametersSchema = make(map[string]*schema.Schema) + databaseParameters = []sdk.AccountParameter{ + sdk.AccountParameterDataRetentionTimeInDays, + sdk.AccountParameterMaxDataExtensionTimeInDays, + sdk.AccountParameterExternalVolume, + sdk.AccountParameterCatalog, + sdk.AccountParameterReplaceInvalidCharacters, + sdk.AccountParameterDefaultDDLCollation, + sdk.AccountParameterStorageSerializationPolicy, + sdk.AccountParameterLogLevel, + sdk.AccountParameterTraceLevel, + sdk.AccountParameterSuspendTaskAfterNumFailures, + sdk.AccountParameterTaskAutoRetryAttempts, + sdk.AccountParameterUserTaskManagedInitialWarehouseSize, + sdk.AccountParameterUserTaskTimeoutMs, + sdk.AccountParameterUserTaskMinimumTriggerIntervalInSeconds, + sdk.AccountParameterQuotedIdentifiersIgnoreCase, + sdk.AccountParameterEnableConsoleOutput, + } +) + +func init() { + for _, param := range databaseParameters { + ShowDatabaseParametersSchema[strings.ToLower(string(param))] = ParameterSchema + } +} + +func DatabaseParametersToSchema(parameters []*sdk.Parameter) map[string]any { + databaseParametersValue := make(map[string]any) + for _, param := range parameters { + if slices.Contains(databaseParameters, sdk.AccountParameter(param.Key)) { + databaseParametersValue[strings.ToLower(param.Key)] = []map[string]any{ParameterToSchema(param)} + } + } + return databaseParametersValue +} diff --git a/pkg/schemas/parameter.go b/pkg/schemas/parameter.go index d123875228..efd84d5206 100644 --- a/pkg/schemas/parameter.go +++ b/pkg/schemas/parameter.go @@ -7,26 +7,32 @@ import ( // ParameterSchema represents Snowflake parameter object. // TODO [SNOW-1473425]: should be generated later based on the sdk.Parameter -var ParameterSchema = map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Computed: true, - }, - "value": { - Type: schema.TypeString, - Computed: true, - }, - "default": { - Type: schema.TypeString, - Computed: true, - }, - "level": { - Type: schema.TypeString, - Computed: true, - }, - "description": { - Type: schema.TypeString, - Computed: true, +var ParameterSchema = &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "default": { + Type: schema.TypeString, + Computed: true, + }, + "level": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, }, } diff --git a/pkg/schemas/warehouse_parameters.go b/pkg/schemas/warehouse_parameters.go index 49dfe7aa96..f4f49a2b78 100644 --- a/pkg/schemas/warehouse_parameters.go +++ b/pkg/schemas/warehouse_parameters.go @@ -11,27 +11,9 @@ import ( // TODO [SNOW-1473425]: descriptions (take from .Description; tool to validate changes later) // TODO [SNOW-1473425]: should be generated later based on sdk.WarehouseParameters var ShowWarehouseParametersSchema = map[string]*schema.Schema{ - "max_concurrency_level": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: ParameterSchema, - }, - }, - "statement_queued_timeout_in_seconds": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: ParameterSchema, - }, - }, - "statement_timeout_in_seconds": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: ParameterSchema, - }, - }, + "max_concurrency_level": ParameterSchema, + "statement_queued_timeout_in_seconds": ParameterSchema, + "statement_timeout_in_seconds": ParameterSchema, } // TODO [SNOW-1473425]: validate all present? @@ -39,13 +21,11 @@ func WarehouseParametersToSchema(parameters []*sdk.Parameter) map[string]any { warehouseParameters := make(map[string]any) for _, param := range parameters { parameterSchema := ParameterToSchema(param) - switch strings.ToUpper(param.Key) { - case string(sdk.ObjectParameterMaxConcurrencyLevel): - warehouseParameters["max_concurrency_level"] = []map[string]any{parameterSchema} - case string(sdk.ObjectParameterStatementQueuedTimeoutInSeconds): - warehouseParameters["statement_queued_timeout_in_seconds"] = []map[string]any{parameterSchema} - case string(sdk.ObjectParameterStatementTimeoutInSeconds): - warehouseParameters["statement_timeout_in_seconds"] = []map[string]any{parameterSchema} + switch key := strings.ToUpper(param.Key); key { + case string(sdk.ObjectParameterMaxConcurrencyLevel), + string(sdk.ObjectParameterStatementQueuedTimeoutInSeconds), + string(sdk.ObjectParameterStatementTimeoutInSeconds): + warehouseParameters[strings.ToLower(key)] = []map[string]any{parameterSchema} } } return warehouseParameters diff --git a/pkg/sdk/common_types.go b/pkg/sdk/common_types.go index 2f069da78e..6b1294b7e1 100644 --- a/pkg/sdk/common_types.go +++ b/pkg/sdk/common_types.go @@ -2,7 +2,9 @@ package sdk import ( "errors" + "fmt" "strconv" + "strings" "time" ) @@ -234,6 +236,27 @@ const ( LogLevelOff LogLevel = "OFF" ) +func ToLogLevel(value string) (LogLevel, error) { + switch strings.ToUpper(value) { + case string(LogLevelTrace): + return LogLevelTrace, nil + case string(LogLevelDebug): + return LogLevelDebug, nil + case string(LogLevelInfo): + return LogLevelInfo, nil + case string(LogLevelWarn): + return LogLevelWarn, nil + case string(LogLevelError): + return LogLevelError, nil + case string(LogLevelFatal): + return LogLevelFatal, nil + case string(LogLevelOff): + return LogLevelOff, nil + default: + return "", fmt.Errorf("unknown log level: %s", value) + } +} + var AllLogLevels = []LogLevel{ LogLevelTrace, LogLevelDebug, @@ -252,6 +275,19 @@ const ( TraceLevelOff TraceLevel = "OFF" ) +func ToTraceLevel(value string) (TraceLevel, error) { + switch strings.ToUpper(value) { + case string(TraceLevelAlways): + return TraceLevelAlways, nil + case string(TraceLevelOnEvent): + return TraceLevelOnEvent, nil + case string(TraceLevelOff): + return TraceLevelOff, nil + default: + return "", fmt.Errorf("unknown trace level: %s", value) + } +} + var AllTraceLevels = []TraceLevel{ TraceLevelAlways, TraceLevelOnEvent, diff --git a/pkg/sdk/common_types_test.go b/pkg/sdk/common_types_test.go index 71bdfea4c1..3029d81e10 100644 --- a/pkg/sdk/common_types_test.go +++ b/pkg/sdk/common_types_test.go @@ -1,6 +1,7 @@ package sdk import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -107,3 +108,105 @@ func TestToBoolProperty(t *testing.T) { assert.Equal(t, prop.Description, row.Description) }) } + +func TestToStorageSerializationPolicy(t *testing.T) { + testCases := []struct { + Name string + Input string + Expected StorageSerializationPolicy + Error string + }{ + {Input: string(StorageSerializationPolicyOptimized), Expected: StorageSerializationPolicyOptimized}, + {Input: string(StorageSerializationPolicyCompatible), Expected: StorageSerializationPolicyCompatible}, + {Name: "validation: incorrect storage serialization policy", Input: "incorrect", Error: "unknown storage serialization policy: incorrect"}, + {Name: "validation: empty input", Input: "", Error: "unknown storage serialization policy: "}, + {Name: "validation: lower case input", Input: "optimized", Expected: StorageSerializationPolicyOptimized}, + } + + for _, testCase := range testCases { + name := testCase.Name + if name == "" { + name = fmt.Sprintf("%v storage serialization policy", testCase.Input) + } + t.Run(name, func(t *testing.T) { + value, err := ToStorageSerializationPolicy(testCase.Input) + if testCase.Error != "" { + assert.Empty(t, value) + assert.ErrorContains(t, err, testCase.Error) + } else { + assert.NoError(t, err) + assert.Equal(t, testCase.Expected, value) + } + }) + } +} + +func TestToLogLevel(t *testing.T) { + testCases := []struct { + Name string + Input string + Expected LogLevel + Error string + }{ + {Input: string(LogLevelTrace), Expected: LogLevelTrace}, + {Input: string(LogLevelDebug), Expected: LogLevelDebug}, + {Input: string(LogLevelInfo), Expected: LogLevelInfo}, + {Input: string(LogLevelWarn), Expected: LogLevelWarn}, + {Input: string(LogLevelError), Expected: LogLevelError}, + {Input: string(LogLevelFatal), Expected: LogLevelFatal}, + {Input: string(LogLevelOff), Expected: LogLevelOff}, + {Name: "validation: incorrect log level", Input: "incorrect", Error: "unknown log level: incorrect"}, + {Name: "validation: empty input", Input: "", Error: "unknown log level: "}, + {Name: "validation: lower case input", Input: "info", Expected: LogLevelInfo}, + } + + for _, testCase := range testCases { + name := testCase.Name + if name == "" { + name = fmt.Sprintf("%v log level", testCase.Input) + } + t.Run(name, func(t *testing.T) { + value, err := ToLogLevel(testCase.Input) + if testCase.Error != "" { + assert.Empty(t, value) + assert.ErrorContains(t, err, testCase.Error) + } else { + assert.NoError(t, err) + assert.Equal(t, testCase.Expected, value) + } + }) + } +} + +func TestToTraceLevel(t *testing.T) { + testCases := []struct { + Name string + Input string + Expected TraceLevel + Error string + }{ + {Input: string(TraceLevelAlways), Expected: TraceLevelAlways}, + {Input: string(TraceLevelOnEvent), Expected: TraceLevelOnEvent}, + {Input: string(TraceLevelOff), Expected: TraceLevelOff}, + {Name: "validation: incorrect trace level", Input: "incorrect", Error: "unknown trace level: incorrect"}, + {Name: "validation: empty input", Input: "", Error: "unknown trace level: "}, + {Name: "validation: lower case input", Input: "always", Expected: TraceLevelAlways}, + } + + for _, testCase := range testCases { + name := testCase.Name + if name == "" { + name = fmt.Sprintf("%v trace level", testCase.Input) + } + t.Run(name, func(t *testing.T) { + value, err := ToTraceLevel(testCase.Input) + if testCase.Error != "" { + assert.Empty(t, value) + assert.ErrorContains(t, err, testCase.Error) + } else { + assert.NoError(t, err) + assert.Equal(t, testCase.Expected, value) + } + }) + } +} diff --git a/pkg/sdk/databases.go b/pkg/sdk/databases.go index e676148167..e1c2bd3300 100644 --- a/pkg/sdk/databases.go +++ b/pkg/sdk/databases.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "errors" + "fmt" "strconv" "strings" "time" @@ -144,6 +145,17 @@ const ( StorageSerializationPolicyOptimized StorageSerializationPolicy = "OPTIMIZED" ) +func ToStorageSerializationPolicy(value string) (StorageSerializationPolicy, error) { + switch strings.ToUpper(value) { + case string(StorageSerializationPolicyCompatible): + return StorageSerializationPolicyCompatible, nil + case string(StorageSerializationPolicyOptimized): + return StorageSerializationPolicyOptimized, nil + default: + return "", fmt.Errorf("unknown storage serialization policy: %s", value) + } +} + var AllStorageSerializationPolicies = []StorageSerializationPolicy{ StorageSerializationPolicyCompatible, StorageSerializationPolicyOptimized,