diff --git a/MIGRATION_GUIDE.md b/MIGRATION_GUIDE.md index d524a69be7..2ceb3780e4 100644 --- a/MIGRATION_GUIDE.md +++ b/MIGRATION_GUIDE.md @@ -23,22 +23,34 @@ Force new was added for the following attributes (because no usable SQL alter st - `run_as_role` ### snowflake_warehouse resource changes + +Because of the multiple changes in the resource, the easiest migration way is to follow our [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/resource_migration.md) to perform zero downtime migration. Alternatively, it is possible to follow some pointers below. Either way, familiarize yourself with the resource changes before version bumping. + #### *(potential behavior change)* Default values removed As part of the [redesign](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/ROADMAP.md#preparing-essential-ga-objects-for-the-provider-v1) we are removing the default values for attributes having their defaults on Snowflake side to reduce coupling with the provider. Because of that the following defaults were removed: -- `comment` -- `statement_timeout_in_seconds` -- `statement_queued_timeout_in_seconds` -- `max_concurrency_level` -- `enable_query_acceleration` -- `query_acceleration_max_scale_factor` -- `warehouse_type` +- `comment` (previously `""`) +- `enable_query_acceleration` (previously `false`) +- `query_acceleration_max_scale_factor` (previously `8`) +- `warehouse_type` (previously `"STANDARD"`) +- `max_concurrency_level` (previously `8`) +- `statement_queued_timeout_in_seconds` (previously `0`) +- `statement_timeout_in_seconds` (previously `172800`) + +**Beware!** For attributes being Snowflake parameters (in case of warehouse: `max_concurrency_level`, `statement_queued_timeout_in_seconds`, and `statement_timeout_in_seconds`), this is a breaking change. Previously, not setting a value for them was treated as a fallback to values hardcoded on the provider side. This caused warehouse creation with these parameters set on the warehouse level (and not using the Snowflake default from hierarchy; read more in the [parameters documentation](https://docs.snowflake.com/en/sql-reference/parameters)). To keep the previous values, fill in your configs to the default values listed above. -All previous defaults were aligned with the current Snowflake ones, however: +All previous defaults were aligned with the current Snowflake ones, however it's not possible to distinguish between filled out value and no value in the automatic state upgrader. Therefore, if the given attribute is not filled out in your configuration, terraform will try to perform update after the change (to UNSET the given attribute to the Snowflake default); it should result in no changes on Snowflake object side, but it is required to make Terraform state aligned with your config. **All** other optional fields that were not set inside the config at all (because of the change in handling state logic on our provider side) will follow the same logic. To avoid the need for the changes, fill out the default fields in your config. Alternatively run apply; no further changes should be shown as a part of the plan. -[//]: # (TODO [SNOW-1348102 - next PR]: state migrator?) -- if the given parameter was changed on the account level, terraform will try to update it +#### *(note)* Automatic state migrations +There are three migrations that should happen automatically with the version bump: +- incorrect `2XLARGE`, `3XLARGE`, `4XLARGE`, `5XLARGE`, `6XLARGE` values for warehouse size are changed to the proper ones +- deprecated `wait_for_provisioning` attribute is removed from the state +- old empty resource monitor attribute is cleaned (earlier it was set to `"null"` string) -[//]: # (- TODO [SNOW-1348102 - next PR]: describe the new state approach if decided) +[//]: # (TODO [SNOW-1348102 - after discussion]: describe the new state approach if decided) + +#### *(fix)* Warehouse size UNSET + +Before the changes, removing warehouse size from the config was not handled properly. Because UNSET is not supported for warehouse size (check the [docs](https://docs.snowflake.com/en/sql-reference/sql/alter-warehouse#properties-parameters) - usage notes for unset) and there are multiple defaults possible, removing the size from config will result in the resource recreation. #### *(behavior change)* Validation changes As part of the [redesign](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/ROADMAP.md#preparing-essential-ga-objects-for-the-provider-v1) we are adjusting validations or removing them to reduce coupling between Snowflake and the provider. Because of that the following validations were removed/adjusted/added: @@ -57,6 +69,9 @@ As part of the [redesign](https://github.com/Snowflake-Labs/terraform-provider-s #### *(behavior change)* `query_acceleration_max_scale_factor` conditional logic removed Previously, the `query_acceleration_max_scale_factor` was depending on `enable_query_acceleration` parameter, but it is not required on Snowflake side. After migration, `terraform plan` should suggest changes if `enable_query_acceleration` was earlier set to false (manually or from default) and if `query_acceleration_max_scale_factor` was set in config. +#### *(behavior change)* `initially_suspended` forceNew removed +Previously, the `initially_suspended` attribute change caused the resource recreation. This attribute is used only during creation (to create suspended warehouse). There is no reason to recreate the whole object just to have initial state changed. + #### *(behavior change)* Boolean type changes To easily handle three-value logic (true, false, unknown) in provider's configs, type of `auto_resume` and `enable_query_acceleration` was changed from boolean to string. This should not require updating existing configs (boolean/int value should be accepted and state will be migrated to string automatically), however we recommend changing config values to strings. Terraform should perform an action for configs lacking `auto_resume` or `enable_query_acceleration` (`ALTER WAREHOUSE UNSET AUTO_RESUME` and/or `ALTER WAREHOUSE UNSET ENABLE_QUERY_ACCELERATION` will be run underneath which should not affect the Snowflake object, because `auto_resume` and `enable_query_acceleration` are false by default). diff --git a/Makefile b/Makefile index 62fd39aa39..af6e1926ec 100644 --- a/Makefile +++ b/Makefile @@ -62,7 +62,7 @@ sweep: ## destroy the whole architecture; USE ONLY FOR DEVELOPMENT ACCOUNTS fi; test: test-client ## run unit and integration tests - go test -v -cover -timeout=30m ./... + go test -v -cover -timeout=45m ./... test-acceptance: ## run acceptance tests TF_ACC=1 SF_TF_ACC_TEST_CONFIGURE_CLIENT_ONCE=true TEST_SF_TF_REQUIRE_TEST_OBJECT_SUFFIX=1 go test -run "^TestAcc_" -v -cover -timeout=60m ./... @@ -76,6 +76,9 @@ test-architecture: ## check architecture constraints between packages test-client: ## runs test that checks sdk.Client without instrumentedsql SF_TF_NO_INSTRUMENTED_SQL=1 SF_TF_GOSNOWFLAKE_LOG_LEVEL=debug go test ./pkg/sdk/internal/client/... -v +test-acceptance-%: ## run acceptance tests for the given resource only, e.g. test-acceptance-Warehouse + TF_ACC=1 SF_TF_ACC_TEST_CONFIGURE_CLIENT_ONCE=true go test -run ^TestAcc_$*_ -v -timeout=20m ./... + build-local: ## build the binary locally go build -o $(BASE_BINARY_NAME) . diff --git a/docs/resources/warehouse.md b/docs/resources/warehouse.md index e1dcbdbd68..cc7e1db4d0 100644 --- a/docs/resources/warehouse.md +++ b/docs/resources/warehouse.md @@ -41,8 +41,8 @@ resource "snowflake_warehouse" "warehouse" { - `scaling_policy` (String) Specifies the policy for automatically starting and shutting down clusters in a multi-cluster warehouse running in Auto-scale mode. Valid values are (case-insensitive): `STANDARD` | `ECONOMY`. - `statement_queued_timeout_in_seconds` (Number) Object parameter that specifies the time, in seconds, a SQL statement (query, DDL, DML, etc.) can be queued on a warehouse before it is canceled by the system. - `statement_timeout_in_seconds` (Number) Specifies the time, in seconds, after which a running SQL statement (query, DDL, DML, etc.) is canceled by the system -- `warehouse_size` (String) Specifies the size of the virtual warehouse. Valid values are (case-insensitive): `XSMALL` | `X-SMALL` | `SMALL` | `MEDIUM` | `LARGE` | `XLARGE` | `X-LARGE` | `XXLARGE` | `X2LARGE` | `2X-LARGE` | `XXXLARGE` | `X3LARGE` | `3X-LARGE` | `X4LARGE` | `4X-LARGE` | `X5LARGE` | `5X-LARGE` | `X6LARGE` | `6X-LARGE`. Consult [warehouse documentation](https://docs.snowflake.com/en/sql-reference/sql/create-warehouse#optional-properties-objectproperties) for the details. -- `warehouse_type` (String) Specifies warehouse type. Valid values are (case-insensitive): `STANDARD` | `SNOWPARK-OPTIMIZED`. +- `warehouse_size` (String) Specifies the size of the virtual warehouse. Valid values are (case-insensitive): `XSMALL` | `X-SMALL` | `SMALL` | `MEDIUM` | `LARGE` | `XLARGE` | `X-LARGE` | `XXLARGE` | `X2LARGE` | `2X-LARGE` | `XXXLARGE` | `X3LARGE` | `3X-LARGE` | `X4LARGE` | `4X-LARGE` | `X5LARGE` | `5X-LARGE` | `X6LARGE` | `6X-LARGE`. Consult [warehouse documentation](https://docs.snowflake.com/en/sql-reference/sql/create-warehouse#optional-properties-objectproperties) for the details. Note: removing the size from config will result in the resource recreation. +- `warehouse_type` (String) Specifies warehouse type. Valid values are (case-insensitive): `STANDARD` | `SNOWPARK-OPTIMIZED`. Warehouse needs to be suspended to change its type. Provider will handle automatic suspension and resumption if needed. ### Read-Only diff --git a/pkg/acceptance/helpers/parameter_client.go b/pkg/acceptance/helpers/parameter_client.go index 54435b4c63..56d6b47bf3 100644 --- a/pkg/acceptance/helpers/parameter_client.go +++ b/pkg/acceptance/helpers/parameter_client.go @@ -47,6 +47,17 @@ func (c *ParameterClient) ShowDatabaseParameters(t *testing.T, id sdk.AccountObj return params } +func (c *ParameterClient) ShowWarehouseParameters(t *testing.T, id sdk.AccountObjectIdentifier) []*sdk.Parameter { + t.Helper() + params, err := c.client().ShowParameters(context.Background(), &sdk.ShowParametersOptions{ + In: &sdk.ParametersIn{ + Warehouse: id, + }, + }) + require.NoError(t, err) + return params +} + func (c *ParameterClient) UpdateAccountParameterTemporarily(t *testing.T, parameter sdk.AccountParameter, newValue string) func() { t.Helper() ctx := context.Background() diff --git a/pkg/acceptance/helpers/warehouse_client.go b/pkg/acceptance/helpers/warehouse_client.go index bd933cb789..ad2b6fae05 100644 --- a/pkg/acceptance/helpers/warehouse_client.go +++ b/pkg/acceptance/helpers/warehouse_client.go @@ -43,9 +43,14 @@ func (c *WarehouseClient) CreateWarehouse(t *testing.T) (*sdk.Warehouse, func()) func (c *WarehouseClient) CreateWarehouseWithOptions(t *testing.T, id sdk.AccountObjectIdentifier, opts *sdk.CreateWarehouseOptions) (*sdk.Warehouse, func()) { t.Helper() ctx := context.Background() + err := c.client().Create(ctx, id, opts) require.NoError(t, err) - return &sdk.Warehouse{Name: id.Name()}, c.DropWarehouseFunc(t, id) + + warehouse, err := c.client().ShowByID(ctx, id) + require.NoError(t, err) + + return warehouse, c.DropWarehouseFunc(t, id) } func (c *WarehouseClient) DropWarehouseFunc(t *testing.T, id sdk.AccountObjectIdentifier) func() { @@ -96,6 +101,15 @@ func (c *WarehouseClient) UpdateStatementTimeoutInSeconds(t *testing.T, id sdk.A require.NoError(t, err) } +func (c *WarehouseClient) UnsetStatementTimeoutInSeconds(t *testing.T, id sdk.AccountObjectIdentifier) { + t.Helper() + + ctx := context.Background() + + err := c.client().Alter(ctx, id, &sdk.AlterWarehouseOptions{Unset: &sdk.WarehouseUnset{StatementTimeoutInSeconds: sdk.Bool(true)}}) + require.NoError(t, err) +} + func (c *WarehouseClient) UpdateAutoResume(t *testing.T, id sdk.AccountObjectIdentifier, newAutoResume bool) { t.Helper() @@ -105,6 +119,15 @@ func (c *WarehouseClient) UpdateAutoResume(t *testing.T, id sdk.AccountObjectIde require.NoError(t, err) } +func (c *WarehouseClient) UpdateAutoSuspend(t *testing.T, id sdk.AccountObjectIdentifier, newAutoSuspend int) { + t.Helper() + + ctx := context.Background() + + err := c.client().Alter(ctx, id, &sdk.AlterWarehouseOptions{Set: &sdk.WarehouseSet{AutoSuspend: sdk.Int(newAutoSuspend)}}) + require.NoError(t, err) +} + func (c *WarehouseClient) Suspend(t *testing.T, id sdk.AccountObjectIdentifier) { t.Helper() diff --git a/pkg/acceptance/snowflakechecks/warehouse.go b/pkg/acceptance/snowflakechecks/warehouse.go index 4d84a38d18..e0be13e9d8 100644 --- a/pkg/acceptance/snowflakechecks/warehouse.go +++ b/pkg/acceptance/snowflakechecks/warehouse.go @@ -52,3 +52,17 @@ func CheckAutoResume(t *testing.T, id sdk.AccountObjectIdentifier, expectedAutoR return nil } } + +func CheckAutoSuspendCount(t *testing.T, id sdk.AccountObjectIdentifier, expectedAutoSuspend int) func(state *terraform.State) error { + t.Helper() + return func(_ *terraform.State) error { + warehouse, err := acc.TestClient().Warehouse.Show(t, id) + if err != nil { + return err + } + if warehouse.AutoSuspend != expectedAutoSuspend { + return fmt.Errorf("expected auto suspend: %d; got: %d", expectedAutoSuspend, warehouse.AutoSuspend) + } + return nil + } +} diff --git a/pkg/resources/custom_diffs.go b/pkg/resources/custom_diffs.go index a3f338bed1..c3e417e523 100644 --- a/pkg/resources/custom_diffs.go +++ b/pkg/resources/custom_diffs.go @@ -84,6 +84,9 @@ func ComputedIfAnyAttributeChanged(key string, changedAttributeKeys ...string) s return customdiff.ComputedIf(key, func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool { var result bool for _, changedKey := range changedAttributeKeys { + if diff.HasChange(changedKey) { + log.Printf("[DEBUG] ComputedIfAnyAttributeChanged: changed key: %s\n", changedKey) + } result = result || diff.HasChange(changedKey) } return result diff --git a/pkg/resources/diff_suppressions.go b/pkg/resources/diff_suppressions.go index 3701323cee..d2de195517 100644 --- a/pkg/resources/diff_suppressions.go +++ b/pkg/resources/diff_suppressions.go @@ -1,6 +1,11 @@ package resources -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) func NormalizeAndCompare[T comparable](normalize func(string) (T, error)) schema.SchemaDiffSuppressFunc { return func(_, oldValue, newValue string, _ *schema.ResourceData) bool { @@ -15,3 +20,39 @@ func NormalizeAndCompare[T comparable](normalize func(string) (T, error)) schema return oldNormalized == newNormalized } } + +// IgnoreAfterCreation should be used to ignore changes to the given attribute post creation. +func IgnoreAfterCreation(_, _, _ string, d *schema.ResourceData) bool { + // For new resources always show the diff and in every other case we do not want to use this attribute + return d.Id() != "" +} + +func IgnoreChangeToCurrentSnowflakeValue(keyInShowOutput string) schema.SchemaDiffSuppressFunc { + return func(_, _, new string, d *schema.ResourceData) bool { + if d.Id() == "" { + return false + } + + if showOutput, ok := d.GetOk(showOutputAttributeName); ok { + showOutputList := showOutput.([]any) + if len(showOutputList) == 1 { + result := showOutputList[0].(map[string]any) + log.Printf("[DEBUG] IgnoreChangeToCurrentSnowflakeValue: value for key %s is %v, new value is %s, comparison result is: %t", keyInShowOutput, result[keyInShowOutput], new, new == fmt.Sprintf("%v", result[keyInShowOutput])) + if new == fmt.Sprintf("%v", result[keyInShowOutput]) { + return true + } + } + } + return false + } +} + +func SuppressIfAny(diffSuppressFunctions ...schema.SchemaDiffSuppressFunc) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + var suppress bool + for _, f := range diffSuppressFunctions { + suppress = suppress || f(k, old, new, d) + } + return suppress + } +} diff --git a/pkg/resources/diff_suppressions_test.go b/pkg/resources/diff_suppressions_test.go index 15f1eb95fb..9bf249d338 100644 --- a/pkg/resources/diff_suppressions_test.go +++ b/pkg/resources/diff_suppressions_test.go @@ -6,6 +6,7 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" ) @@ -53,3 +54,29 @@ func Test_NormalizeAndCompare(t *testing.T) { assert.False(t, result) }) } + +func Test_IgnoreAfterCreation(t *testing.T) { + testSchema := map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + Optional: true, + }, + } + + t.Run("without id", func(t *testing.T) { + in := map[string]any{} + d := schema.TestResourceDataRaw(t, testSchema, in) + + result := resources.IgnoreAfterCreation("", "", "", d) + assert.False(t, result) + }) + + t.Run("with id", func(t *testing.T) { + in := map[string]any{} + d := schema.TestResourceDataRaw(t, testSchema, in) + d.SetId("something") + + result := resources.IgnoreAfterCreation("", "", "", d) + assert.True(t, result) + }) +} diff --git a/pkg/resources/warehouse.go b/pkg/resources/warehouse.go index dc45615f46..bdc51b0881 100644 --- a/pkg/resources/warehouse.go +++ b/pkg/resources/warehouse.go @@ -18,8 +18,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -// TODO [SNOW-1348102 - next PR]: extract three-value logic; add better description for each field -// TODO [SNOW-1348102 - next PR]: handle conditional suspension for some updates (additional optional field) +// TODO [SNOW-1348102 - if we choose this approach]: extract three-value logic; add better description for each field var warehouseSchema = map[string]*schema.Schema{ "name": { Type: schema.TypeString, @@ -30,62 +29,65 @@ var warehouseSchema = map[string]*schema.Schema{ Type: schema.TypeString, Optional: true, ValidateDiagFunc: sdkValidation(sdk.ToWarehouseType), - DiffSuppressFunc: NormalizeAndCompare(sdk.ToWarehouseType), - Description: fmt.Sprintf("Specifies warehouse type. Valid values are (case-insensitive): %s.", possibleValuesListed(sdk.ValidWarehouseTypesString)), + DiffSuppressFunc: SuppressIfAny(NormalizeAndCompare(sdk.ToWarehouseType), IgnoreChangeToCurrentSnowflakeValue("type")), + Description: fmt.Sprintf("Specifies warehouse type. Valid values are (case-insensitive): %s. Warehouse needs to be suspended to change its type. Provider will handle automatic suspension and resumption if needed.", possibleValuesListed(sdk.ValidWarehouseTypesString)), }, "warehouse_size": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: sdkValidation(sdk.ToWarehouseSize), - DiffSuppressFunc: NormalizeAndCompare(sdk.ToWarehouseSize), - Description: fmt.Sprintf("Specifies the size of the virtual warehouse. Valid values are (case-insensitive): %s. Consult [warehouse documentation](https://docs.snowflake.com/en/sql-reference/sql/create-warehouse#optional-properties-objectproperties) for the details.", possibleValuesListed(sdk.ValidWarehouseSizesString)), + DiffSuppressFunc: SuppressIfAny(NormalizeAndCompare(sdk.ToWarehouseSize), IgnoreChangeToCurrentSnowflakeValue("size")), + Description: fmt.Sprintf("Specifies the size of the virtual warehouse. Valid values are (case-insensitive): %s. Consult [warehouse documentation](https://docs.snowflake.com/en/sql-reference/sql/create-warehouse#optional-properties-objectproperties) for the details. Note: removing the size from config will result in the resource recreation.", possibleValuesListed(sdk.ValidWarehouseSizesString)), }, "max_cluster_count": { - Type: schema.TypeInt, - Description: "Specifies the maximum number of server clusters for the warehouse.", - Optional: true, - ValidateFunc: validation.IntBetween(1, 10), + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValue("max_cluster_count"), + Description: "Specifies the maximum number of server clusters for the warehouse.", }, "min_cluster_count": { - Type: schema.TypeInt, - Description: "Specifies the minimum number of server clusters for the warehouse (only applies to multi-cluster warehouses).", - Optional: true, - ValidateFunc: validation.IntBetween(1, 10), + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValue("min_cluster_count"), + Description: "Specifies the minimum number of server clusters for the warehouse (only applies to multi-cluster warehouses).", }, "scaling_policy": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: sdkValidation(sdk.ToScalingPolicy), - DiffSuppressFunc: NormalizeAndCompare(sdk.ToScalingPolicy), + DiffSuppressFunc: SuppressIfAny(NormalizeAndCompare(sdk.ToWarehouseType), IgnoreChangeToCurrentSnowflakeValue("scaling_policy")), Description: fmt.Sprintf("Specifies the policy for automatically starting and shutting down clusters in a multi-cluster warehouse running in Auto-scale mode. Valid values are (case-insensitive): %s.", possibleValuesListed(sdk.ValidWarehouseScalingPoliciesString)), }, "auto_suspend": { - Type: schema.TypeInt, - Description: "Specifies the number of seconds of inactivity after which a warehouse is automatically suspended.", - Optional: true, - ValidateFunc: validation.IntAtLeast(0), - Default: -1, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValue("auto_suspend"), + Description: "Specifies the number of seconds of inactivity after which a warehouse is automatically suspended.", + Default: -1, }, "auto_resume": { - Type: schema.TypeString, - Description: "Specifies whether to automatically resume a warehouse when a SQL statement (e.g. query) is submitted to it.", - ValidateFunc: validation.StringInSlice([]string{"true", "false"}, true), - Optional: true, - Default: "unknown", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"true", "false"}, true), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValue("auto_resume"), + Description: "Specifies whether to automatically resume a warehouse when a SQL statement (e.g. query) is submitted to it.", + Default: "unknown", }, - // TODO [SNOW-1348102 - next PR]: do we really need forceNew for this? "initially_suspended": { - Type: schema.TypeBool, - Description: "Specifies whether the warehouse is created initially in the ‘Suspended’ state.", - Optional: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + DiffSuppressFunc: IgnoreAfterCreation, + Description: "Specifies whether the warehouse is created initially in the ‘Suspended’ state.", }, "resource_monitor": { Type: schema.TypeString, - Description: "Specifies the name of a resource monitor that is explicitly assigned to the warehouse.", Optional: true, ValidateDiagFunc: IsValidIdentifier[sdk.AccountObjectIdentifier](), - DiffSuppressFunc: suppressIdentifierQuoting, + DiffSuppressFunc: SuppressIfAny(suppressIdentifierQuoting, IgnoreChangeToCurrentSnowflakeValue("resource_monitor")), + Description: "Specifies the name of a resource monitor that is explicitly assigned to the warehouse.", }, "comment": { Type: schema.TypeString, @@ -93,18 +95,20 @@ var warehouseSchema = map[string]*schema.Schema{ Description: "Specifies a comment for the warehouse.", }, "enable_query_acceleration": { - Type: schema.TypeString, - Description: "Specifies whether to enable the query acceleration service for queries that rely on this warehouse for compute resources.", - ValidateFunc: validation.StringInSlice([]string{"true", "false"}, true), - Optional: true, - Default: "unknown", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"true", "false"}, true), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValue("enable_query_acceleration"), + Description: "Specifies whether to enable the query acceleration service for queries that rely on this warehouse for compute resources.", + Default: "unknown", }, "query_acceleration_max_scale_factor": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - Description: "Specifies the maximum scale factor for leasing compute resources for query acceleration. The scale factor is used as a multiplier based on warehouse size.", - Default: -1, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100), + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValue("query_acceleration_max_scale_factor"), + Description: "Specifies the maximum scale factor for leasing compute resources for query acceleration. The scale factor is used as a multiplier based on warehouse size.", + Default: -1, }, strings.ToLower(string(sdk.ObjectParameterMaxConcurrencyLevel)): { Type: schema.TypeInt, @@ -162,7 +166,7 @@ func Warehouse() *schema.Resource { }, CustomizeDiff: customdiff.All( - ComputedIfAnyAttributeChanged(showOutputAttributeName, "warehouse_type", "warehouse_size", "max_cluster_count", "min_cluster_count", "scaling_policy", "auto_suspend", "auto_resume", "initially_suspended", "resource_monitor", "comment", "enable_query_acceleration", "query_acceleration_max_scale_factor"), + ComputedIfAnyAttributeChanged(showOutputAttributeName, "warehouse_type", "warehouse_size", "max_cluster_count", "min_cluster_count", "scaling_policy", "auto_suspend", "auto_resume", "resource_monitor", "comment", "enable_query_acceleration", "query_acceleration_max_scale_factor"), ComputedIfAnyAttributeChanged(parametersAttributeName, strings.ToLower(string(sdk.ObjectParameterMaxConcurrencyLevel)), strings.ToLower(string(sdk.ObjectParameterStatementQueuedTimeoutInSeconds)), strings.ToLower(string(sdk.ObjectParameterStatementTimeoutInSeconds))), customdiff.ForceNewIfChange("warehouse_size", func(ctx context.Context, old, new, meta any) bool { return old.(string) != "" && new.(string) == "" @@ -214,7 +218,7 @@ func ImportWarehouse(ctx context.Context, d *schema.ResourceData, meta any) ([]* if err = d.Set("auto_resume", fmt.Sprintf("%t", w.AutoResume)); err != nil { return nil, err } - if err = d.Set("resource_monitor", w.ResourceMonitor); err != nil { + if err = d.Set("resource_monitor", w.ResourceMonitor.Name()); err != nil { return nil, err } if err = d.Set("comment", w.Comment); err != nil { @@ -341,9 +345,7 @@ func GetReadWarehouseFunc(withExternalChangesMarking bool) schema.ReadContextFun showMapping{"scaling_policy", "scaling_policy", string(w.ScalingPolicy), w.ScalingPolicy, nil}, showMapping{"auto_suspend", "auto_suspend", w.AutoSuspend, w.AutoSuspend, nil}, showMapping{"auto_resume", "auto_resume", w.AutoResume, fmt.Sprintf("%t", w.AutoResume), nil}, - showMapping{"resource_monitor", "resource_monitor", sdk.NewAccountIdentifierFromFullyQualifiedName(w.ResourceMonitor).FullyQualifiedName(), w.ResourceMonitor, func(from any) any { - return sdk.NewAccountIdentifierFromFullyQualifiedName(from.(string)).FullyQualifiedName() - }}, + showMapping{"resource_monitor", "resource_monitor", w.ResourceMonitor.Name(), w.ResourceMonitor.Name(), nil}, showMapping{"comment", "comment", w.Comment, w.Comment, nil}, showMapping{"enable_query_acceleration", "enable_query_acceleration", w.EnableQueryAcceleration, fmt.Sprintf("%t", w.EnableQueryAcceleration), nil}, showMapping{"query_acceleration_max_scale_factor", "query_acceleration_max_scale_factor", w.QueryAccelerationMaxScaleFactor, w.QueryAccelerationMaxScaleFactor, nil}, @@ -356,6 +358,71 @@ func GetReadWarehouseFunc(withExternalChangesMarking bool) schema.ReadContextFun } } + // These are all identity sets, needed for the case where: + // - previous config was empty (therefore Snowflake defaults had been used) + // - new config have the same values that are already in SF + if !d.GetRawConfig().IsNull() { + if v := d.GetRawConfig().AsValueMap()["warehouse_type"]; !v.IsNull() { + if err = d.Set("warehouse_type", v.AsString()); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["warehouse_size"]; !v.IsNull() { + if err = d.Set("warehouse_size", v.AsString()); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["max_cluster_count"]; !v.IsNull() { + intVal, _ := v.AsBigFloat().Int64() + if err = d.Set("max_cluster_count", intVal); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["min_cluster_count"]; !v.IsNull() { + intVal, _ := v.AsBigFloat().Int64() + if err = d.Set("min_cluster_count", intVal); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["scaling_policy"]; !v.IsNull() { + if err = d.Set("scaling_policy", v.AsString()); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["auto_suspend"]; !v.IsNull() { + intVal, _ := v.AsBigFloat().Int64() + if err = d.Set("auto_suspend", intVal); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["auto_resume"]; !v.IsNull() { + if err = d.Set("auto_resume", v.AsString()); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["resource_monitor"]; !v.IsNull() { + if err = d.Set("resource_monitor", v.AsString()); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["comment"]; !v.IsNull() { + if err = d.Set("comment", v.AsString()); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["enable_query_acceleration"]; !v.IsNull() { + if err = d.Set("enable_query_acceleration", v.AsString()); err != nil { + return diag.FromErr(err) + } + } + if v := d.GetRawConfig().AsValueMap()["query_acceleration_max_scale_factor"]; !v.IsNull() { + intVal, _ := v.AsBigFloat().Int64() + if err = d.Set("query_acceleration_max_scale_factor", intVal); err != nil { + return diag.FromErr(err) + } + } + } + if err = d.Set(showOutputAttributeName, []map[string]any{schemas.WarehouseToSchema(w)}); err != nil { return diag.FromErr(err) } @@ -411,6 +478,8 @@ func UpdateWarehouse(ctx context.Context, d *schema.ResourceData, meta any) diag return diag.FromErr(err) } set.WarehouseSize = &size + // For now, we always want to wait for the resize completion. In the future, we may parametrize it. + set.WaitForCompletion = sdk.Bool(true) } if d.HasChange("max_cluster_count") { if v, ok := d.GetOk("max_cluster_count"); ok { @@ -434,14 +503,16 @@ func UpdateWarehouse(ctx context.Context, d *schema.ResourceData, meta any) diag } set.ScalingPolicy = &scalingPolicy } else { - unset.ScalingPolicy = sdk.Bool(true) + // TODO [SNOW-1473453]: UNSET of scaling policy does not work + // unset.ScalingPolicy = sdk.Bool(true) + set.ScalingPolicy = &sdk.ScalingPolicyStandard } } if d.HasChange("auto_suspend") { if v := d.Get("auto_suspend").(int); v != -1 { set.AutoSuspend = sdk.Int(v) } else { - // TODO [SNOW-1473453]: UNSET of type does not work + // TODO [SNOW-1473453]: UNSET of auto suspend works incorrectly // unset.AutoSuspend = sdk.Bool(true) set.AutoSuspend = sdk.Int(600) } @@ -454,7 +525,9 @@ func UpdateWarehouse(ctx context.Context, d *schema.ResourceData, meta any) diag } set.AutoResume = sdk.Bool(parsed) } else { - unset.AutoResume = sdk.Bool(true) + // TODO [SNOW-1473453]: UNSET of auto resume works incorrectly + // unset.AutoResume = sdk.Bool(true) + set.AutoResume = sdk.Bool(true) } } if d.HasChange("resource_monitor") { diff --git a/pkg/resources/warehouse_acceptance_test.go b/pkg/resources/warehouse_acceptance_test.go index 9aec7fd1ec..3d9b998d6a 100644 --- a/pkg/resources/warehouse_acceptance_test.go +++ b/pkg/resources/warehouse_acceptance_test.go @@ -13,6 +13,7 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/importchecks" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/planchecks" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/snowflakechecks" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/testenvs" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -21,15 +22,19 @@ import ( "github.com/stretchr/testify/require" ) -// [SNOW-1348102 - next PR]: merge this test with others added -func TestAcc_Warehouse(t *testing.T) { +func TestAcc_Warehouse_BasicFlows(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) warehouseId := acc.TestClient().Ids.RandomAccountObjectIdentifier() warehouseId2 := acc.TestClient().Ids.RandomAccountObjectIdentifier() - prefix := warehouseId.Name() - prefix2 := warehouseId2.Name() + name := warehouseId.Name() + name2 := warehouseId2.Name() comment := random.Comment() newComment := random.Comment() + resourceMonitor, resourceMonitorCleanup := acc.TestClient().ResourceMonitor.CreateResourceMonitor(t) + t.Cleanup(resourceMonitorCleanup) + resourceMonitorId := resourceMonitor.ID() + resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, PreCheck: func() { acc.TestAccPreCheck(t) }, @@ -39,124 +44,212 @@ func TestAcc_Warehouse(t *testing.T) { CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), Steps: []resource.TestStep{ { - Config: wConfig(prefix, comment), + Config: warehouseBasicConfigWithComment(name, comment), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", prefix), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", name), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "warehouse_type"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "warehouse_size"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "max_cluster_count"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "min_cluster_count"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "scaling_policy"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "-1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "unknown"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "initially_suspended"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "resource_monitor"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "comment", comment), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "60"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "min_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "enable_query_acceleration", "unknown"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "-1"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_concurrency_level", "-1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", "-1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "-1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.type", string(sdk.WarehouseTypeStandard)), resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.size", string(sdk.WarehouseSizeXSmall)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.max_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.min_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.scaling_policy", string(sdk.ScalingPolicyStandard)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_suspend", "600"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_resume", "true"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.resource_monitor", ""), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.comment", comment), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.enable_query_acceleration", "false"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.query_acceleration_max_scale_factor", "8"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.#", "1"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.max_concurrency_level.0.value", "8"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_queued_timeout_in_seconds.0.value", "0"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.value", "172800"), + ), + }, + // IMPORT after empty config (in this method, most of the attributes will be filled with the defaults acquired from Snowflake) + { + ResourceName: "snowflake_warehouse.w", + ImportState: true, + ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "name", name), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "warehouse_type", string(sdk.WarehouseTypeStandard)), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "warehouse_size", string(sdk.WarehouseSizeXSmall)), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "max_cluster_count", "1"), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "min_cluster_count", "1"), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "scaling_policy", string(sdk.ScalingPolicyStandard)), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "auto_suspend", "600"), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "auto_resume", "true"), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "resource_monitor", ""), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "comment", comment), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "enable_query_acceleration", "false"), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "query_acceleration_max_scale_factor", "8"), + + // parameters are not set on the object level, so they won't be imported + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "max_concurrency_level", "-1"), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "statement_queued_timeout_in_seconds", "-1"), + importchecks.TestCheckResourceAttrInstanceState(warehouseId.Name(), "statement_timeout_in_seconds", "-1"), ), }, // RENAME { - Config: wConfig(prefix2, newComment), + Config: warehouseBasicConfigWithComment(name2, comment), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ plancheck.ExpectResourceAction("snowflake_warehouse.w", plancheck.ResourceActionUpdate), }, }, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", prefix2), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", name2), ), }, - // CHANGE PROPERTIES (proves https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2652) + // Change config but use defaults for every attribute (but not the parameters) - expect no changes (because these are already SF values) except computed show_output (follow-up why suppress diff is not taken into account in has changes?) { - Config: wConfig2(prefix2, string(sdk.WarehouseSizeXLarge), 20, 2, newComment), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + planchecks.PrintPlanDetails("snowflake_warehouse.w", "warehouse_type", "warehouse_size", "max_cluster_count", "min_cluster_count", "scaling_policy", "auto_suspend", "auto_resume", "enable_query_acceleration", "query_acceleration_max_scale_factor", "max_concurrency_level", "statement_queued_timeout_in_seconds", "statement_timeout_in_seconds", "show_output"), + plancheck.ExpectResourceAction("snowflake_warehouse.w", plancheck.ResourceActionUpdate), + planchecks.ExpectComputed("snowflake_warehouse.w", "show_output", true), + }, + }, + Config: warehouseFullDefaultWithoutParametersConfig(name2, comment), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", prefix2), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "comment", newComment), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "60"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeXLarge)), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "min_cluster_count", "2"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.max_concurrency_level.0.value", "20"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_type", string(sdk.WarehouseTypeStandard)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeXSmall)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "min_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "scaling_policy", string(sdk.ScalingPolicyStandard)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "600"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "true"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "initially_suspended"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "resource_monitor"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "comment", comment), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "enable_query_acceleration", "false"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "8"), + + // parameters will still remain unset + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_concurrency_level", "-1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", "-1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "-1"), + ), + }, + // add parameters - update expected (different level even with same values) + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + planchecks.PrintPlanDetails("snowflake_warehouse.w", "warehouse_type", "warehouse_size", "max_cluster_count", "min_cluster_count", "scaling_policy", "auto_suspend", "auto_resume", "enable_query_acceleration", "query_acceleration_max_scale_factor", "max_concurrency_level", "statement_queued_timeout_in_seconds", "statement_timeout_in_seconds", "show_output"), + + planchecks.ExpectChange("snowflake_warehouse.w", "max_concurrency_level", tfjson.ActionUpdate, sdk.String("-1"), sdk.String("8")), + planchecks.ExpectChange("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", tfjson.ActionUpdate, sdk.String("-1"), sdk.String("0")), + planchecks.ExpectChange("snowflake_warehouse.w", "statement_timeout_in_seconds", tfjson.ActionUpdate, sdk.String("-1"), sdk.String("172800")), + }, + }, + Config: warehouseFullDefaultConfig(name2, comment), + Check: resource.ComposeTestCheckFunc( + // no changes in the attributes + resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_type", string(sdk.WarehouseTypeStandard)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeXSmall)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "min_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "scaling_policy", string(sdk.ScalingPolicyStandard)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "600"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "true"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "initially_suspended"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "resource_monitor"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "comment", comment), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "enable_query_acceleration", "false"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "8"), + + // parameters were changed + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_concurrency_level", "8"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", "0"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "172800"), ), }, - // CHANGE JUST max_concurrency_level + // CHANGE PROPERTIES (normal and parameters) { - Config: wConfig2(prefix2, string(sdk.WarehouseSizeXLarge), 16, 2, newComment), ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{plancheck.ExpectNonEmptyPlan()}, + PreApply: []plancheck.PlanCheck{ + planchecks.PrintPlanDetails("snowflake_warehouse.w", "warehouse_type", "warehouse_size", "max_cluster_count", "min_cluster_count", "scaling_policy", "auto_suspend", "auto_resume", "enable_query_acceleration", "query_acceleration_max_scale_factor", "max_concurrency_level", "statement_queued_timeout_in_seconds", "statement_timeout_in_seconds", "show_output"), + + planchecks.ExpectChange("snowflake_warehouse.w", "warehouse_type", tfjson.ActionUpdate, sdk.String(string(sdk.WarehouseTypeStandard)), sdk.String(string(sdk.WarehouseTypeSnowparkOptimized))), + planchecks.ExpectChange("snowflake_warehouse.w", "warehouse_size", tfjson.ActionUpdate, sdk.String(string(sdk.WarehouseSizeXSmall)), sdk.String(string(sdk.WarehouseSizeMedium))), + planchecks.ExpectChange("snowflake_warehouse.w", "max_cluster_count", tfjson.ActionUpdate, sdk.String("1"), sdk.String("4")), + planchecks.ExpectChange("snowflake_warehouse.w", "min_cluster_count", tfjson.ActionUpdate, sdk.String("1"), sdk.String("2")), + planchecks.ExpectChange("snowflake_warehouse.w", "scaling_policy", tfjson.ActionUpdate, sdk.String(string(sdk.ScalingPolicyStandard)), sdk.String(string(sdk.ScalingPolicyEconomy))), + planchecks.ExpectChange("snowflake_warehouse.w", "auto_suspend", tfjson.ActionUpdate, sdk.String("600"), sdk.String("1200")), + planchecks.ExpectChange("snowflake_warehouse.w", "auto_resume", tfjson.ActionUpdate, sdk.String("true"), sdk.String("false")), + planchecks.ExpectChange("snowflake_warehouse.w", "enable_query_acceleration", tfjson.ActionUpdate, sdk.String("false"), sdk.String("true")), + planchecks.ExpectChange("snowflake_warehouse.w", "query_acceleration_max_scale_factor", tfjson.ActionUpdate, sdk.String("8"), sdk.String("4")), + + planchecks.ExpectChange("snowflake_warehouse.w", "max_concurrency_level", tfjson.ActionUpdate, sdk.String("8"), sdk.String("4")), + planchecks.ExpectChange("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", tfjson.ActionUpdate, sdk.String("0"), sdk.String("5")), + planchecks.ExpectChange("snowflake_warehouse.w", "statement_timeout_in_seconds", tfjson.ActionUpdate, sdk.String("172800"), sdk.String("86400")), + }, }, + Config: warehouseFullConfigNoDefaults(name2, newComment, resourceMonitorId), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", prefix2), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_type", string(sdk.WarehouseTypeSnowparkOptimized)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeMedium)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_cluster_count", "4"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "min_cluster_count", "2"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "scaling_policy", string(sdk.ScalingPolicyEconomy)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "1200"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "false"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "initially_suspended"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "resource_monitor", resourceMonitorId.Name()), resource.TestCheckResourceAttr("snowflake_warehouse.w", "comment", newComment), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "60"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeXLarge)), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.max_concurrency_level.0.value", "16"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "enable_query_acceleration", "true"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "4"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_concurrency_level", "4"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", "5"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "86400"), ), }, // CHANGE max_concurrency_level EXTERNALLY (proves https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2318) { PreConfig: func() { acc.TestClient().Warehouse.UpdateMaxConcurrencyLevel(t, warehouseId2, 10) }, ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{plancheck.ExpectNonEmptyPlan()}, + PreApply: []plancheck.PlanCheck{ + planchecks.ExpectDrift("snowflake_warehouse.w", "max_concurrency_level", sdk.String("4"), sdk.String("10")), + planchecks.ExpectChange("snowflake_warehouse.w", "max_concurrency_level", tfjson.ActionUpdate, sdk.String("10"), sdk.String("4")), + }, }, - Config: wConfig2(prefix2, string(sdk.WarehouseSizeXLarge), 16, 2, newComment), + Config: warehouseFullConfigNoDefaults(name2, newComment, resourceMonitorId), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", prefix2), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "comment", newComment), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "60"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeXLarge)), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_concurrency_level", "16"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", name2), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_concurrency_level", "4"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.max_concurrency_level.0.value", "4"), ), }, // IMPORT - // [SNOW-1348102 - next PR]: fox import (resource_monitor) and adjust the expected fields here { - ResourceName: "snowflake_warehouse.w", - ImportState: true, - // ImportStateVerify: true, - // ImportStateVerifyIgnore: []string{ - // "initially_suspended", - // "query_acceleration_max_scale_factor", - // "max_concurrency_level", - // "statement_queued_timeout_in_seconds", - // "statement_timeout_in_seconds", - // }, + ResourceName: "snowflake_warehouse.w", + ImportState: true, + ImportStateVerify: true, }, }, }) } -func wConfig(prefix string, comment string) string { - return fmt.Sprintf(` -resource "snowflake_warehouse" "w" { - name = "%s" - comment = "%s" - - auto_suspend = 60 - max_cluster_count = 4 - min_cluster_count = 1 - scaling_policy = "STANDARD" - auto_resume = true - initially_suspended = true -} -`, prefix, comment) -} - -func wConfig2(prefix string, size string, maxConcurrencyLevel int, minClusterCount int, comment string) string { - return fmt.Sprintf(` -resource "snowflake_warehouse" "w" { - name = "%[1]s" - comment = "%[5]s" - warehouse_size = "%[2]s" - - auto_suspend = 60 - max_cluster_count = 4 - min_cluster_count = %[4]d - scaling_policy = "STANDARD" - auto_resume = true - initially_suspended = true - max_concurrency_level = %[3]d -} -`, prefix, size, maxConcurrencyLevel, minClusterCount, comment) -} - func TestAcc_Warehouse_WarehouseType(t *testing.T) { id := acc.TestClient().Ids.RandomAccountObjectIdentifier() @@ -190,6 +283,7 @@ func TestAcc_Warehouse_WarehouseType(t *testing.T) { ResourceName: "snowflake_warehouse.w", ImportState: true, ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "warehouse_type", string(sdk.WarehouseTypeStandard)), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.#", "1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.type", string(sdk.WarehouseTypeStandard)), @@ -197,10 +291,6 @@ func TestAcc_Warehouse_WarehouseType(t *testing.T) { }, // change type in config { - PreConfig: func() { - // TODO [SNOW-1348102 - next PR]: currently just for tests, later add suspension to the resource (additional field state to allow escaping from the bad situation?) - acc.TestClient().Warehouse.Suspend(t, id) - }, ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ planchecks.PrintPlanDetails("snowflake_warehouse.w", "warehouse_type", "show_output"), @@ -303,6 +393,7 @@ func TestAcc_Warehouse_WarehouseType(t *testing.T) { ResourceName: "snowflake_warehouse.w", ImportState: true, ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "warehouse_type", string(sdk.WarehouseTypeStandard)), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.#", "1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.type", string(sdk.WarehouseTypeStandard)), @@ -345,6 +436,7 @@ func TestAcc_Warehouse_WarehouseSizes(t *testing.T) { ResourceName: "snowflake_warehouse.w", ImportState: true, ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "warehouse_size", string(sdk.WarehouseSizeSmall)), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.#", "1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.size", string(sdk.WarehouseSizeSmall)), @@ -454,6 +546,7 @@ func TestAcc_Warehouse_WarehouseSizes(t *testing.T) { ResourceName: "snowflake_warehouse.w", ImportState: true, ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "warehouse_size", string(sdk.WarehouseSizeXSmall)), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.#", "1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.size", string(sdk.WarehouseSizeXSmall)), @@ -463,8 +556,7 @@ func TestAcc_Warehouse_WarehouseSizes(t *testing.T) { }) } -// [SNOW-1348102 - next PR]: add more validations -func TestAcc_Warehouse_SizeValidation(t *testing.T) { +func TestAcc_Warehouse_Validations(t *testing.T) { id := acc.TestClient().Ids.RandomAccountObjectIdentifier() resource.Test(t, resource.TestCase{ @@ -475,10 +567,89 @@ func TestAcc_Warehouse_SizeValidation(t *testing.T) { }, CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), Steps: []resource.TestStep{ + { + Config: warehouseWithTypeConfig(id.Name(), "unknown", sdk.WarehouseSizeXSmall), + ExpectError: regexp.MustCompile("invalid warehouse type: unknown"), + }, { Config: warehouseWithSizeConfig(id.Name(), "SMALLa"), ExpectError: regexp.MustCompile("invalid warehouse size: SMALLa"), }, + { + Config: warehouseConfigWithMaxClusterCount(id.Name(), 100), + ExpectError: regexp.MustCompile(`expected max_cluster_count to be in the range \(1 - 10\), got 100`), + }, + { + Config: warehouseConfigWithMinClusterCount(id.Name(), 100), + ExpectError: regexp.MustCompile(`expected min_cluster_count to be in the range \(1 - 10\), got 100`), + }, + { + Config: warehouseConfigWithScalingPolicy(id.Name(), "unknown"), + ExpectError: regexp.MustCompile("invalid scaling policy: unknown"), + }, + { + Config: warehouseWithAutoResumeConfig(id.Name(), "other"), + ExpectError: regexp.MustCompile(`expected auto_resume to be one of \["true" "false"], got other`), + }, + { + Config: warehouseConfigWithMaxConcurrencyLevel(id.Name(), -1), + ExpectError: regexp.MustCompile(`expected max_concurrency_level to be at least \(1\), got -1`), + }, + }, + }) +} + +// Just for the experimental purposes +func TestAcc_Warehouse_ValidateDriftForCurrentWarehouse(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.ConfigureClientOnce) + id := acc.TestClient().Ids.RandomAccountObjectIdentifier() + secondId := acc.TestClient().Ids.RandomAccountObjectIdentifier() + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), + Steps: []resource.TestStep{ + { + Config: warehouseBasicConfig(id.Name()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.is_current", "true"), + ), + }, + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("snowflake_warehouse.w", plancheck.ResourceActionNoop), + plancheck.ExpectResourceAction("snowflake_warehouse.w2", plancheck.ResourceActionCreate), + }, + }, + Config: warehouseBasicConfig(id.Name()) + secondWarehouseBasicConfig(secondId.Name()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.is_current", "true"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w2", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w2", "show_output.0.is_current", "true"), + ), + }, + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + planchecks.ExpectDrift("snowflake_warehouse.w", "show_output.0.is_current", sdk.String("true"), sdk.String("false")), + plancheck.ExpectResourceAction("snowflake_warehouse.w", plancheck.ResourceActionNoop), + plancheck.ExpectResourceAction("snowflake_warehouse.w2", plancheck.ResourceActionNoop), + }, + }, + Config: warehouseBasicConfig(id.Name()) + secondWarehouseBasicConfig(secondId.Name()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.is_current", "false"), + ), + }, }, }) } @@ -504,7 +675,7 @@ func TestAcc_Warehouse_AutoResume(t *testing.T) { planchecks.ExpectComputed("snowflake_warehouse.w", "show_output", true), }, }, - Config: warehouseWithAutoResumeConfig(id.Name(), true), + Config: warehouseWithAutoResumeConfig(id.Name(), "true"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "true"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), @@ -517,6 +688,7 @@ func TestAcc_Warehouse_AutoResume(t *testing.T) { ResourceName: "snowflake_warehouse.w", ImportState: true, ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "auto_resume", "true"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.#", "1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.auto_resume", "true"), @@ -531,7 +703,7 @@ func TestAcc_Warehouse_AutoResume(t *testing.T) { planchecks.ExpectComputed("snowflake_warehouse.w", "show_output", true), }, }, - Config: warehouseWithAutoResumeConfig(id.Name(), false), + Config: warehouseWithAutoResumeConfig(id.Name(), "false"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "false"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), @@ -553,31 +725,31 @@ func TestAcc_Warehouse_AutoResume(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "unknown"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_resume", "false"), - snowflakechecks.CheckAutoResume(t, id, false), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_resume", "true"), + snowflakechecks.CheckAutoResume(t, id, true), ), }, // change auto resume externally { PreConfig: func() { // we change the auto resume to the type different from default, expecting action - acc.TestClient().Warehouse.UpdateAutoResume(t, id, true) + acc.TestClient().Warehouse.UpdateAutoResume(t, id, false) }, Config: warehouseBasicConfig(id.Name()), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ plancheck.ExpectNonEmptyPlan(), planchecks.PrintPlanDetails("snowflake_warehouse.w", "auto_resume", "show_output"), - planchecks.ExpectDrift("snowflake_warehouse.w", "auto_resume", sdk.String("unknown"), sdk.String("true")), - planchecks.ExpectDrift("snowflake_warehouse.w", "show_output.0.auto_resume", sdk.String("false"), sdk.String("true")), - planchecks.ExpectChange("snowflake_warehouse.w", "auto_resume", tfjson.ActionUpdate, sdk.String("true"), sdk.String("unknown")), + planchecks.ExpectDrift("snowflake_warehouse.w", "auto_resume", sdk.String("unknown"), sdk.String("false")), + planchecks.ExpectDrift("snowflake_warehouse.w", "show_output.0.auto_resume", sdk.String("true"), sdk.String("false")), + planchecks.ExpectChange("snowflake_warehouse.w", "auto_resume", tfjson.ActionUpdate, sdk.String("false"), sdk.String("unknown")), planchecks.ExpectComputed("snowflake_warehouse.w", "show_output", true), }, }, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "unknown"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_resume", "false"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_resume", "true"), snowflakechecks.CheckWarehouseType(t, id, sdk.WarehouseTypeStandard), ), }, @@ -586,9 +758,124 @@ func TestAcc_Warehouse_AutoResume(t *testing.T) { ResourceName: "snowflake_warehouse.w", ImportState: true, ImportStateCheck: importchecks.ComposeImportStateCheck( - importchecks.TestCheckResourceAttrInstanceState(id.Name(), "auto_resume", "false"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "auto_resume", "true"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.#", "1"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.auto_resume", "true"), + ), + }, + }, + }) +} + +// TestAcc_Warehouse_AutoSuspend validates behavior for falling back to Snowflake default for the integer attribute +func TestAcc_Warehouse_AutoSuspend(t *testing.T) { + id := acc.TestClient().Ids.RandomAccountObjectIdentifier() + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), + Steps: []resource.TestStep{ + // set up with auto suspend set in config + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + planchecks.PrintPlanDetails("snowflake_warehouse.w", "auto_suspend", "show_output"), + planchecks.ExpectChange("snowflake_warehouse.w", "auto_suspend", tfjson.ActionCreate, nil, sdk.String("1200")), + planchecks.ExpectComputed("snowflake_warehouse.w", "show_output", true), + }, + }, + Config: warehouseConfigWithAutoSuspend(id.Name(), 1200), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "1200"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_suspend", "1200"), + snowflakechecks.CheckAutoSuspendCount(t, id, 1200), + ), + }, + // import when auto suspend in config + { + ResourceName: "snowflake_warehouse.w", + ImportState: true, + ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "auto_suspend", "1200"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.#", "1"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.auto_suspend", "1200"), + ), + }, + // change value in config to Snowflake default + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + planchecks.PrintPlanDetails("snowflake_warehouse.w", "auto_suspend", "show_output"), + planchecks.ExpectChange("snowflake_warehouse.w", "auto_suspend", tfjson.ActionUpdate, sdk.String("1200"), sdk.String("600")), + planchecks.ExpectComputed("snowflake_warehouse.w", "show_output", true), + }, + }, + Config: warehouseConfigWithAutoSuspend(id.Name(), 600), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "600"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_suspend", "600"), + snowflakechecks.CheckAutoSuspendCount(t, id, 600), + ), + }, + // remove auto suspend from config (expecting non-empty plan because we do not know the default) + { + Config: warehouseBasicConfig(id.Name()), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("snowflake_warehouse.w", plancheck.ResourceActionUpdate), + planchecks.PrintPlanDetails("snowflake_warehouse.w", "auto_suspend", "show_output"), + planchecks.ExpectChange("snowflake_warehouse.w", "auto_suspend", tfjson.ActionUpdate, sdk.String("600"), sdk.String("-1")), + planchecks.ExpectComputed("snowflake_warehouse.w", "show_output", true), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "-1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_suspend", "600"), + snowflakechecks.CheckAutoSuspendCount(t, id, 600), + ), + }, + // change auto suspend externally + { + PreConfig: func() { + // we change the max cluster count to the type different from default, expecting action + acc.TestClient().Warehouse.UpdateAutoSuspend(t, id, 2400) + }, + Config: warehouseBasicConfig(id.Name()), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectNonEmptyPlan(), + planchecks.PrintPlanDetails("snowflake_warehouse.w", "auto_suspend", "show_output"), + planchecks.ExpectDrift("snowflake_warehouse.w", "auto_suspend", sdk.String("-1"), sdk.String("2400")), + planchecks.ExpectDrift("snowflake_warehouse.w", "show_output.0.auto_suspend", sdk.String("600"), sdk.String("2400")), + planchecks.ExpectChange("snowflake_warehouse.w", "auto_suspend", tfjson.ActionUpdate, sdk.String("2400"), sdk.String("-1")), + planchecks.ExpectComputed("snowflake_warehouse.w", "show_output", true), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "-1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.auto_suspend", "600"), + snowflakechecks.CheckAutoSuspendCount(t, id, 600), + ), + }, + // import when no type in config + { + ResourceName: "snowflake_warehouse.w", + ImportState: true, + ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "auto_suspend", "600"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.#", "1"), - importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.auto_resume", "false"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.auto_suspend", "600"), ), }, }, @@ -634,9 +921,6 @@ func TestAcc_Warehouse_ZeroValues(t *testing.T) { resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_queued_timeout_in_seconds.0.level", string(sdk.ParameterTypeWarehouse)), resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.value", "0"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.level", string(sdk.ParameterTypeWarehouse)), - - // TODO [SNOW-1348102 - next PR]: snowflake checks? - // snowflakechecks.CheckWarehouseSize(t, id, sdk.WarehouseSizeSmall), ), }, // remove all from config (to validate that unset is run correctly) @@ -699,6 +983,29 @@ func TestAcc_Warehouse_ZeroValues(t *testing.T) { resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.level", string(sdk.ParameterTypeWarehouse)), ), }, + // import zero values + { + ResourceName: "snowflake_warehouse.w", + ImportState: true, + ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), + + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "auto_suspend", "0"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "query_acceleration_max_scale_factor", "0"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "statement_queued_timeout_in_seconds", "0"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "statement_timeout_in_seconds", "0"), + + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.#", "1"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.auto_suspend", "0"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "show_output.0.query_acceleration_max_scale_factor", "0"), + + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.#", "1"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.0.statement_queued_timeout_in_seconds.0.value", "0"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.0.statement_queued_timeout_in_seconds.0.level", string(sdk.ParameterTypeWarehouse)), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.0.statement_timeout_in_seconds.0.value", "0"), + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.0.statement_timeout_in_seconds.0.level", string(sdk.ParameterTypeWarehouse)), + ), + }, }, }) } @@ -730,9 +1037,6 @@ func TestAcc_Warehouse_Parameter(t *testing.T) { resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.#", "1"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.value", "86400"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.level", string(sdk.ParameterTypeWarehouse)), - - // TODO [SNOW-1348102 - next PR]: snowflake checks? - // snowflakechecks.CheckWarehouseSize(t, id, sdk.WarehouseSizeSmall), ), }, // do not make any change (to check if there is no drift) @@ -749,6 +1053,7 @@ func TestAcc_Warehouse_Parameter(t *testing.T) { ResourceName: "snowflake_warehouse.w", ImportState: true, ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "statement_timeout_in_seconds", "86400"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.#", "1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.0.statement_timeout_in_seconds.0.value", "86400"), @@ -822,33 +1127,59 @@ func TestAcc_Warehouse_Parameter(t *testing.T) { resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.level", string(sdk.ParameterTypeWarehouse)), ), }, - // remove the param from config + // change the param value on account to the value from config (but on different level) { PreConfig: func() { - param := acc.TestClient().Parameter.ShowAccountParameter(t, sdk.AccountParameterStatementTimeoutInSeconds) - require.Equal(t, "", string(param.Level)) + acc.TestClient().Warehouse.UnsetStatementTimeoutInSeconds(t, id) + acc.TestClient().Parameter.UpdateAccountParameterTemporarily(t, sdk.AccountParameterStatementTimeoutInSeconds, "43200") }, - Config: warehouseBasicConfig(id.Name()), + Config: warehouseWithParameterConfig(id.Name(), 43200), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ planchecks.PrintPlanDetails("snowflake_warehouse.w", "statement_timeout_in_seconds", "parameters"), - planchecks.ExpectChange("snowflake_warehouse.w", "statement_timeout_in_seconds", tfjson.ActionUpdate, sdk.String("43200"), sdk.String("-1")), + planchecks.ExpectDrift("snowflake_warehouse.w", "statement_timeout_in_seconds", sdk.String("43200"), sdk.String("-1")), + planchecks.ExpectChange("snowflake_warehouse.w", "statement_timeout_in_seconds", tfjson.ActionUpdate, sdk.String("-1"), sdk.String("43200")), planchecks.ExpectComputed("snowflake_warehouse.w", "parameters", true), }, }, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "-1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "43200"), resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.#", "1"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.value", "172800"), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.level", ""), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.value", "43200"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.level", string(sdk.ParameterTypeWarehouse)), ), }, - // import when param not in config (snowflake default) + // remove the param from config + { + PreConfig: func() { + // clean after previous step + acc.TestClient().Parameter.UnsetAccountParameter(t, sdk.AccountParameterStatementTimeoutInSeconds) + param := acc.TestClient().Parameter.ShowAccountParameter(t, sdk.AccountParameterStatementTimeoutInSeconds) + require.Equal(t, "", string(param.Level)) + }, + Config: warehouseBasicConfig(id.Name()), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + planchecks.PrintPlanDetails("snowflake_warehouse.w", "statement_timeout_in_seconds", "parameters"), + planchecks.ExpectChange("snowflake_warehouse.w", "statement_timeout_in_seconds", tfjson.ActionUpdate, sdk.String("43200"), sdk.String("-1")), + planchecks.ExpectComputed("snowflake_warehouse.w", "parameters", true), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "-1"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.value", "172800"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "parameters.0.statement_timeout_in_seconds.0.level", ""), + ), + }, + // import when param not in config (snowflake default) { ResourceName: "snowflake_warehouse.w", ImportState: true, ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "statement_timeout_in_seconds", "-1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.#", "1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.0.statement_timeout_in_seconds.0.value", "172800"), @@ -924,6 +1255,7 @@ func TestAcc_Warehouse_Parameter(t *testing.T) { ResourceName: "snowflake_warehouse.w", ImportState: true, ImportStateCheck: importchecks.ComposeImportStateCheck( + importchecks.TestCheckResourceAttrInstanceState(id.Name(), "name", id.Name()), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "statement_timeout_in_seconds", "-1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.#", "1"), importchecks.TestCheckResourceAttrInstanceState(id.Name(), "parameters.0.statement_timeout_in_seconds.0.value", "86400"), @@ -976,9 +1308,59 @@ func TestAcc_Warehouse_Parameter(t *testing.T) { }) } -// TODO [SNOW-1348102 - next PR]: unskip - it fails currently because of other state upgraders missing -func TestAcc_Warehouse_migrateFromVersion091_withWarehouseSize(t *testing.T) { - t.Skip("Skipped due to the missing state migrators for other props") +func TestAcc_Warehouse_InitiallySuspendedChangesPostCreation(t *testing.T) { + id := acc.TestClient().Ids.RandomAccountObjectIdentifier() + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), + Steps: []resource.TestStep{ + { + Config: warehouseWithInitiallySuspendedConfig(id.Name(), true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "initially_suspended", "true"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.state", string(sdk.WarehouseStateSuspended)), + ), + }, + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + Config: warehouseWithInitiallySuspendedConfig(id.Name(), false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "initially_suspended", "true"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.state", string(sdk.WarehouseStateSuspended)), + ), + }, + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + Config: warehouseBasicConfig(id.Name()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "initially_suspended", "true"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.state", string(sdk.WarehouseStateSuspended)), + ), + }, + }, + }) +} + +func TestAcc_Warehouse_migrateFromVersion092_withWarehouseSize(t *testing.T) { id := acc.TestClient().Ids.RandomAccountObjectIdentifier() resource.Test(t, resource.TestCase{ @@ -996,7 +1378,7 @@ func TestAcc_Warehouse_migrateFromVersion091_withWarehouseSize(t *testing.T) { Source: "Snowflake-Labs/snowflake", }, }, - Config: warehouseWithSizeConfig(id.Name(), string(sdk.WarehouseSizeX4Large)), + Config: warehouseFullMigrationConfigWithSize(id.Name(), "", sdk.WarehouseSizeX4Large), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", "4XLARGE"), @@ -1004,10 +1386,12 @@ func TestAcc_Warehouse_migrateFromVersion091_withWarehouseSize(t *testing.T) { }, { ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, - Config: warehouseWithSizeConfig(id.Name(), string(sdk.WarehouseSizeX4Large)), ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{plancheck.ExpectEmptyPlan()}, + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, }, + Config: warehouseFullMigrationConfigWithSize(id.Name(), "", sdk.WarehouseSizeX4Large), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeX4Large)), @@ -1017,16 +1401,7 @@ func TestAcc_Warehouse_migrateFromVersion091_withWarehouseSize(t *testing.T) { }) } -// TODO [SNOW-1348102 - next PR]: test defaults removal -// TODO [SNOW-1348102 - next PR]: test basic creation (check previous defaults) -// TODO [SNOW-1348102 - next PR]: test auto_suspend set to 0 (or NULL?) -// TODO [SNOW-1348102 - next PR]: do we care about drift in warehouse for is_current warehouse? (test) -// TODO [SNOW-1348102 - next PR]: test boolean type change (with leaving boolean/int in config) and add migration -// TODO [SNOW-1348102 - next PR]: test int, string, identifier changed externally -// TODO [SNOW-1348102 - next PR]: test wait_for_provisioning removal -// TODO [SNOW-1348102 - next PR]: unskip - it fails currently because of other state upograders missing -func TestAcc_Warehouse_migrateFromVersion091_withoutWarehouseSize(t *testing.T) { - t.Skip("Skipped due to the missing state migrators for other props") +func TestAcc_Warehouse_migrateFromVersion092_allFieldsFilledBeforeMigration(t *testing.T) { id := acc.TestClient().Ids.RandomAccountObjectIdentifier() resource.Test(t, resource.TestCase{ @@ -1044,27 +1419,486 @@ func TestAcc_Warehouse_migrateFromVersion091_withoutWarehouseSize(t *testing.T) Source: "Snowflake-Labs/snowflake", }, }, - Config: warehouseBasicConfig(id.Name()), + Config: warehouseFullMigrationConfig(id.Name(), true), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "wait_for_provisioning", "true"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "resource_monitor", "null"), + ), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: warehouseFullMigrationConfig(id.Name(), false), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "wait_for_provisioning"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "resource_monitor"), + ), + }, + }, + }) +} + +// The result of removing the custom conditional logic for enable_query_acceleration and query_acceleration_max_scale_factor. +func TestAcc_Warehouse_migrateFromVersion092_queryAccelerationMaxScaleFactor_sameConfig(t *testing.T) { + id := acc.TestClient().Ids.RandomAccountObjectIdentifier() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), + + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.92.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: warehouseFullDefaultConfig(id.Name(), ""), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor"), + ), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: warehouseFullDefaultConfig(id.Name(), ""), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + planchecks.PrintPlanDetails("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "show_output"), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "8"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.query_acceleration_max_scale_factor", "8"), + ), + }, + }, + }) +} + +// The result of removing the custom conditional logic for enable_query_acceleration and query_acceleration_max_scale_factor. +func TestAcc_Warehouse_migrateFromVersion092_queryAccelerationMaxScaleFactor_noInConfigAfter(t *testing.T) { + id := acc.TestClient().Ids.RandomAccountObjectIdentifier() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), + + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.92.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: warehouseFullDefaultConfig(id.Name(), ""), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor"), + ), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: warehouseFullDefaultConfigWithQueryAccelerationMaxScaleFactorRemoved(id.Name(), ""), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + planchecks.PrintPlanDetails("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "show_output"), + planchecks.ExpectChange("snowflake_warehouse.w", "query_acceleration_max_scale_factor", tfjson.ActionUpdate, sdk.String("8"), sdk.String("-1")), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "-1"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.query_acceleration_max_scale_factor", "8"), + ), + }, + }, + }) +} + +// The result of removing the custom conditional logic for enable_query_acceleration and query_acceleration_max_scale_factor. +func TestAcc_Warehouse_migrateFromVersion092_queryAccelerationMaxScaleFactor_differentConfigAfterMigration(t *testing.T) { + id := acc.TestClient().Ids.RandomAccountObjectIdentifier() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), + + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.92.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: warehouseFullDefaultConfig(id.Name(), ""), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor"), + ), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: warehouseFullDefaultConfigWithQueryAcceleration(id.Name(), "", true, 10), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + planchecks.PrintPlanDetails("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "show_output"), + planchecks.ExpectChange("snowflake_warehouse.w", "query_acceleration_max_scale_factor", tfjson.ActionUpdate, sdk.String("8"), sdk.String("10")), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "10"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.#", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "show_output.0.query_acceleration_max_scale_factor", "10"), + ), + }, + }, + }) +} + +func TestAcc_Warehouse_migrateFromVersion092_noConfigToFullConfig(t *testing.T) { + id := acc.TestClient().Ids.RandomAccountObjectIdentifier() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), + + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.92.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + // query acceleration is needed here because of the custom logic that was removed + Config: warehouseBasicConfigWithQueryAcceleration(id.Name()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + ), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: warehouseFullDefaultConfigWithQueryAcceleration(id.Name(), "", true, 8), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_type", string(sdk.WarehouseTypeStandard)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeXSmall)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "min_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "scaling_policy", string(sdk.ScalingPolicyStandard)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "600"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "true"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "initially_suspended"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "resource_monitor"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "comment", ""), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "enable_query_acceleration", "true"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor", "8"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_concurrency_level", "8"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", "0"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "172800"), + ), + }, + }, + }) +} + +func TestAcc_Warehouse_migrateFromVersion092_defaultsRemoved(t *testing.T) { + id := acc.TestClient().Ids.RandomAccountObjectIdentifier() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), + + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.92.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: warehouseWithSizeConfig(id.Name(), string(sdk.WarehouseSizeXSmall)), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_type", string(sdk.WarehouseTypeStandard)), resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeXSmall)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "min_cluster_count", "1"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "scaling_policy", string(sdk.ScalingPolicyStandard)), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_suspend", "600"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "auto_resume", "true"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "initially_suspended"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "resource_monitor", "null"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "comment", ""), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "enable_query_acceleration", "false"), + resource.TestCheckNoResourceAttr("snowflake_warehouse.w", "query_acceleration_max_scale_factor"), + + resource.TestCheckResourceAttr("snowflake_warehouse.w", "max_concurrency_level", "8"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", "0"), + resource.TestCheckResourceAttr("snowflake_warehouse.w", "statement_timeout_in_seconds", "172800"), + ), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: warehouseWithSizeConfig(id.Name(), string(sdk.WarehouseSizeXSmall)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + planchecks.ExpectChange("snowflake_warehouse.w", "warehouse_type", tfjson.ActionUpdate, sdk.String(string(sdk.WarehouseTypeStandard)), nil), + planchecks.ExpectChange("snowflake_warehouse.w", "warehouse_size", tfjson.ActionUpdate, sdk.String(string(sdk.WarehouseSizeXSmall)), sdk.String(string(sdk.WarehouseSizeXSmall))), + planchecks.ExpectChange("snowflake_warehouse.w", "max_cluster_count", tfjson.ActionUpdate, sdk.String("1"), nil), + planchecks.ExpectChange("snowflake_warehouse.w", "min_cluster_count", tfjson.ActionUpdate, sdk.String("1"), nil), + planchecks.ExpectChange("snowflake_warehouse.w", "scaling_policy", tfjson.ActionUpdate, sdk.String(string(sdk.ScalingPolicyStandard)), nil), + planchecks.ExpectChange("snowflake_warehouse.w", "auto_suspend", tfjson.ActionUpdate, sdk.String("600"), sdk.String("-1")), + planchecks.ExpectChange("snowflake_warehouse.w", "auto_resume", tfjson.ActionUpdate, sdk.String("true"), sdk.String("unknown")), + planchecks.ExpectChange("snowflake_warehouse.w", "enable_query_acceleration", tfjson.ActionUpdate, sdk.String("false"), sdk.String("unknown")), + planchecks.ExpectChange("snowflake_warehouse.w", "query_acceleration_max_scale_factor", tfjson.ActionUpdate, sdk.String("8"), sdk.String("-1")), + + planchecks.ExpectChange("snowflake_warehouse.w", "max_concurrency_level", tfjson.ActionUpdate, sdk.String("8"), sdk.String("-1")), + planchecks.ExpectChange("snowflake_warehouse.w", "statement_queued_timeout_in_seconds", tfjson.ActionUpdate, sdk.String("0"), sdk.String("-1")), + planchecks.ExpectChange("snowflake_warehouse.w", "statement_timeout_in_seconds", tfjson.ActionUpdate, sdk.String("172800"), sdk.String("-1")), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), + ), + }, + }, + }) +} + +func TestAcc_Warehouse_migrateFromVersion092_warehouseSizeCausingForceNew(t *testing.T) { + id := acc.TestClient().Ids.RandomAccountObjectIdentifier() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Warehouse), + + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.92.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: warehouseBasicConfig(id.Name()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), ), }, { ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, Config: warehouseBasicConfig(id.Name()), ConfigPlanChecks: resource.ConfigPlanChecks{ - PreApply: []plancheck.PlanCheck{plancheck.ExpectEmptyPlan()}, + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("snowflake_warehouse.w", plancheck.ResourceActionDestroyBeforeCreate), + }, }, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_warehouse.w", "name", id.Name()), - resource.TestCheckResourceAttr("snowflake_warehouse.w", "warehouse_size", string(sdk.WarehouseSizeXSmall)), ), }, }, }) } +func warehouseBasicConfig(name string) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%s" +} +`, name) +} + +func secondWarehouseBasicConfig(name string) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w2" { + name = "%s" +} +`, name) +} + +func warehouseBasicConfigWithQueryAcceleration(name string) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%s" + enable_query_acceleration = "true" + query_acceleration_max_scale_factor = "8" +} +`, name) +} + +func warehouseFullMigrationConfig(name string, withDeprecatedAttribute bool) string { + deprecatedAttribute := "" + if withDeprecatedAttribute { + deprecatedAttribute = "wait_for_provisioning = true" + } + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%[1]s" + warehouse_type = "STANDARD" + warehouse_size = "XSMALL" + max_cluster_count = 1 + min_cluster_count = 1 + scaling_policy = "STANDARD" + auto_suspend = 600 + auto_resume = true + initially_suspended = false + enable_query_acceleration = true + query_acceleration_max_scale_factor = 8 + + max_concurrency_level = 8 + statement_queued_timeout_in_seconds = 0 + statement_timeout_in_seconds = 172800 + + %s +} +`, name, deprecatedAttribute) +} + +func warehouseBasicConfigWithComment(name string, comment string) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%s" + comment = "%s" +} +`, name, comment) +} + +func warehouseFullDefaultConfig(name string, comment string) string { + return warehouseFullDefaultConfigWithQueryAcceleration(name, comment, false, 8) +} + +func warehouseFullDefaultConfigWithQueryAcceleration(name string, comment string, enableQueryAcceleration bool, queryAccelerationMaxScaleFactor int) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%[1]s" + warehouse_type = "STANDARD" + warehouse_size = "XSMALL" + max_cluster_count = 1 + min_cluster_count = 1 + scaling_policy = "STANDARD" + auto_suspend = 600 + auto_resume = true + initially_suspended = false + comment = "%[2]s" + enable_query_acceleration = %[3]t + query_acceleration_max_scale_factor = %[4]d + + max_concurrency_level = 8 + statement_queued_timeout_in_seconds = 0 + statement_timeout_in_seconds = 172800 +} +`, name, comment, enableQueryAcceleration, queryAccelerationMaxScaleFactor) +} + +func warehouseFullDefaultConfigWithQueryAccelerationMaxScaleFactorRemoved(name string, comment string) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%[1]s" + warehouse_type = "STANDARD" + warehouse_size = "XSMALL" + max_cluster_count = 1 + min_cluster_count = 1 + scaling_policy = "STANDARD" + auto_suspend = 600 + auto_resume = true + initially_suspended = false + comment = "%[2]s" + enable_query_acceleration = false + + max_concurrency_level = 8 + statement_queued_timeout_in_seconds = 0 + statement_timeout_in_seconds = 172800 +} +`, name, comment) +} + +func warehouseFullDefaultWithoutParametersConfig(name string, comment string) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%[1]s" + warehouse_type = "STANDARD" + warehouse_size = "XSMALL" + max_cluster_count = 1 + min_cluster_count = 1 + scaling_policy = "STANDARD" + auto_suspend = 600 + auto_resume = true + initially_suspended = false + comment = "%[2]s" + enable_query_acceleration = false + query_acceleration_max_scale_factor = 8 +} +`, name, comment) +} + +func warehouseFullConfigNoDefaults(name string, comment string, id sdk.AccountObjectIdentifier) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%[1]s" + warehouse_type = "SNOWPARK-OPTIMIZED" + warehouse_size = "MEDIUM" + max_cluster_count = 4 + min_cluster_count = 2 + scaling_policy = "ECONOMY" + auto_suspend = 1200 + auto_resume = false + initially_suspended = false + resource_monitor = "%[3]s" + comment = "%[2]s" + enable_query_acceleration = true + query_acceleration_max_scale_factor = 4 + + max_concurrency_level = 4 + statement_queued_timeout_in_seconds = 5 + statement_timeout_in_seconds = 86400 +} +`, name, comment, id.Name()) +} + func warehouseWithSizeConfig(name string, size string) string { return fmt.Sprintf(` resource "snowflake_warehouse" "w" { @@ -1084,23 +1918,15 @@ resource "snowflake_warehouse" "w" { `, name, warehouseType, size) } -func warehouseWithAutoResumeConfig(name string, autoResume bool) string { +func warehouseWithAutoResumeConfig(name string, autoResume string) string { return fmt.Sprintf(` resource "snowflake_warehouse" "w" { name = "%s" - auto_resume = "%t" + auto_resume = "%s" } `, name, autoResume) } -func warehouseBasicConfig(name string) string { - return fmt.Sprintf(` -resource "snowflake_warehouse" "w" { - name = "%s" -} -`, name) -} - func warehouseWithAllValidZeroValuesConfig(name string) string { return fmt.Sprintf(` resource "snowflake_warehouse" "w" { @@ -1121,3 +1947,80 @@ resource "snowflake_warehouse" "w" { } `, name, value) } + +func warehouseWithInitiallySuspendedConfig(name string, initiallySuspended bool) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%s" + initially_suspended = %t +} +`, name, initiallySuspended) +} + +func warehouseFullMigrationConfigWithSize(name string, comment string, size sdk.WarehouseSize) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%[1]s" + warehouse_type = "STANDARD" + warehouse_size = "%[3]s" + max_cluster_count = 1 + min_cluster_count = 1 + scaling_policy = "STANDARD" + auto_suspend = 600 + auto_resume = true + initially_suspended = false + comment = "%[2]s" + enable_query_acceleration = true + query_acceleration_max_scale_factor = 8 + + max_concurrency_level = 8 + statement_queued_timeout_in_seconds = 0 + statement_timeout_in_seconds = 172800 +} +`, name, comment, size) +} + +func warehouseConfigWithMaxClusterCount(name string, count int) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%s" + max_cluster_count = "%d" +} +`, name, count) +} + +func warehouseConfigWithMinClusterCount(name string, count int) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%s" + min_cluster_count = "%d" +} +`, name, count) +} + +func warehouseConfigWithScalingPolicy(name string, policy sdk.ScalingPolicy) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%s" + scaling_policy = "%s" +} +`, name, policy) +} + +func warehouseConfigWithMaxConcurrencyLevel(name string, level int) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%s" + max_concurrency_level = "%d" +} +`, name, level) +} + +func warehouseConfigWithAutoSuspend(name string, autoSuspend int) string { + return fmt.Sprintf(` +resource "snowflake_warehouse" "w" { + name = "%s" + auto_suspend = "%d" +} +`, name, autoSuspend) +} diff --git a/pkg/resources/warehouse_rework_parameters_proposal.go b/pkg/resources/warehouse_rework_parameters_proposal.go index 78f04d71a0..35937fec7a 100644 --- a/pkg/resources/warehouse_rework_parameters_proposal.go +++ b/pkg/resources/warehouse_rework_parameters_proposal.go @@ -12,9 +12,9 @@ import ( const parametersAttributeName = "parameters" // markChangedParameters assumes that the snowflake parameter name is mirrored in schema (as lower-cased name) -// TODO [after discussion/next PR]: test (unit and acceptance) -// TODO [after discussion/next PR]: more readable errors -// TODO [after discussion/next PR]: handle different types than int +// TODO [SNOW-1348102 - after discussion]: test (unit and acceptance) +// TODO [SNOW-1348102 - after discussion]: more readable errors +// TODO [SNOW-1348102 - after discussion]: handle different types than int func markChangedParameters(objectParameters []sdk.ObjectParameter, currentParameters []*sdk.Parameter, d *schema.ResourceData, level sdk.ParameterType) error { for _, param := range objectParameters { currentSnowflakeParameter, err := collections.FindOne(currentParameters, func(p *sdk.Parameter) bool { @@ -42,7 +42,7 @@ func markChangedParameters(objectParameters []sdk.ObjectParameter, currentParame // 1. if it was missing in config before, then no drift will be reported // 2. if it had a non-empty value, then the drift will be reported and the value will be set during update if (*currentSnowflakeParameter).Level != level { - // TODO [after discussion/next PR]: this is currently set to an artificial default + // TODO [SNOW-1348102 - after discussion]: this is currently set to an artificial default if err = d.Set(strings.ToLower(string(param)), -1); err != nil { return err } diff --git a/pkg/resources/warehouse_state_upgraders.go b/pkg/resources/warehouse_state_upgraders.go index 0d1632882f..8803c4fcc3 100644 --- a/pkg/resources/warehouse_state_upgraders.go +++ b/pkg/resources/warehouse_state_upgraders.go @@ -5,10 +5,12 @@ import ( "fmt" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" ) -func v091ToWarehouseSize(s string) (sdk.WarehouseSize, error) { +func v092ToWarehouseSize(s string) (sdk.WarehouseSize, error) { s = strings.ToUpper(s) switch s { case "XSMALL", "X-SMALL": @@ -36,30 +38,55 @@ func v091ToWarehouseSize(s string) (sdk.WarehouseSize, error) { } } -// v092WarehouseSizeStateUpgrader is needed because we are removing incorrect mapped values from sdk.ToWarehouseSize (like 2XLARGE, 3XLARGE, ...) -// Result of: -// - https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/1873 -// - https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/1946 -// - https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1889#issuecomment-1631149585 -func v092WarehouseSizeStateUpgrader(_ context.Context, rawState map[string]interface{}, _ interface{}) (map[string]interface{}, error) { +// v092WarehouseSizeStateUpgrader is needed because: +// - we are removing incorrect mapped values from sdk.ToWarehouseSize (like 2XLARGE, 3XLARGE, ...); result of: +// - https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/1873 +// - https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/1946 +// - https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1889#issuecomment-1631149585 +// +// - deprecated wait_for_provisioning attribute was removed +// - clear the old resource monitor representation +// - set query_acceleration_max_scale_factor +func v092WarehouseSizeStateUpgrader(ctx context.Context, rawState map[string]any, meta any) (map[string]any, error) { if rawState == nil { return rawState, nil } oldWarehouseSize := rawState["warehouse_size"].(string) - if oldWarehouseSize == "" { - return rawState, nil + if oldWarehouseSize != "" { + warehouseSize, err := v092ToWarehouseSize(oldWarehouseSize) + if err != nil { + return nil, err + } + rawState["warehouse_size"] = string(warehouseSize) + } + + // remove deprecated attribute + delete(rawState, "wait_for_provisioning") + + // clear the old resource monitor representation + oldResourceMonitor := rawState["resource_monitor"].(string) + if oldResourceMonitor == "null" { + delete(rawState, "resource_monitor") } - warehouseSize, err := v091ToWarehouseSize(oldWarehouseSize) + // get the warehouse from Snowflake + client := meta.(*provider.Context).Client + id := helpers.DecodeSnowflakeID(rawState["id"].(string)).(sdk.AccountObjectIdentifier) + + w, err := client.Warehouses.ShowByID(ctx, id) if err != nil { return nil, err } - rawState["warehouse_size"] = string(warehouseSize) - // TODO [this PR]: clear wait_for_provisioning and test - // TODO [this PR]: adjust other fields if needed - // TODO [this PR]: adjust description of the upgrader + // fill out query_acceleration_max_scale_factor in state if it was disabled before (old coupling logic that was removed) + // - if config have no value, then we should have a UNSET after migration + // - if config have the same value, then we should have a no-op after migration + // - if config have different value, then we will have SET after migration + previousEnableQueryAcceleration := rawState["enable_query_acceleration"].(bool) + if !previousEnableQueryAcceleration { + rawState["query_acceleration_max_scale_factor"] = w.QueryAccelerationMaxScaleFactor + } return rawState, nil } diff --git a/pkg/schemas/gen/schema_field_mapper.go b/pkg/schemas/gen/schema_field_mapper.go index fc5b7072ab..189d70456a 100644 --- a/pkg/schemas/gen/schema_field_mapper.go +++ b/pkg/schemas/gen/schema_field_mapper.go @@ -21,6 +21,7 @@ var ( Identity = func(field string) string { return field } ToString = func(field string) string { return fmt.Sprintf("%s.String()", field) } FullyQualifiedName = func(field string) string { return fmt.Sprintf("%s.FullyQualifiedName()", field) } + Name = func(field string) string { return fmt.Sprintf("%s.Name()", field) } CastToString = func(field string) string { return fmt.Sprintf("string(%s)", field) } CastToInt = func(field string) string { return fmt.Sprintf("int(%s)", field) } ) @@ -44,8 +45,9 @@ func MapToSchemaField(field Field) SchemaField { return SchemaField{name, schema.TypeBool, field.Name, isPointer, Identity} case "time.Time": return SchemaField{name, schema.TypeString, field.Name, isPointer, ToString} - case "sdk.AccountIdentifier", "sdk.ExternalObjectIdentifier", - "sdk.AccountObjectIdentifier", "sdk.DatabaseObjectIdentifier", + case "sdk.AccountObjectIdentifier": + return SchemaField{name, schema.TypeString, field.Name, isPointer, Name} + case "sdk.AccountIdentifier", "sdk.ExternalObjectIdentifier", "sdk.DatabaseObjectIdentifier", "sdk.SchemaObjectIdentifier", "sdk.TableColumnIdentifier": return SchemaField{name, schema.TypeString, field.Name, isPointer, FullyQualifiedName} case "sdk.ObjectIdentifier": diff --git a/pkg/schemas/gen/schema_field_mapper_test.go b/pkg/schemas/gen/schema_field_mapper_test.go index b13053fac3..1f2f23932a 100644 --- a/pkg/schemas/gen/schema_field_mapper_test.go +++ b/pkg/schemas/gen/schema_field_mapper_test.go @@ -86,7 +86,7 @@ func Test_MapToSchemaField(t *testing.T) { }, { field: Field{"unexportedAccountObjectIdentifier", "sdk.AccountObjectIdentifier", "struct"}, - expected: expectedValues{"unexported_account_object_identifier", schema.TypeString, false, FullyQualifiedName}, + expected: expectedValues{"unexported_account_object_identifier", schema.TypeString, false, Name}, }, { field: Field{"unexportedDatabaseObjectIdentifier", "sdk.DatabaseObjectIdentifier", "struct"}, @@ -110,7 +110,7 @@ func Test_MapToSchemaField(t *testing.T) { }, { field: Field{"unexportedAccountObjectIdentifierPtr", "*sdk.AccountObjectIdentifier", "*struct"}, - expected: expectedValues{"unexported_account_object_identifier_ptr", schema.TypeString, true, FullyQualifiedName}, + expected: expectedValues{"unexported_account_object_identifier_ptr", schema.TypeString, true, Name}, }, { field: Field{"unexportedDatabaseObjectIdentifierPtr", "*sdk.DatabaseObjectIdentifier", "*struct"}, diff --git a/pkg/schemas/grant_gen.go b/pkg/schemas/grant_gen.go index aae677fe4b..a1fe5fb227 100644 --- a/pkg/schemas/grant_gen.go +++ b/pkg/schemas/grant_gen.go @@ -64,7 +64,7 @@ func GrantToSchema(grant *sdk.Grant) map[string]any { grantSchema["grant_to"] = string(grant.GrantTo) grantSchema["grantee_name"] = grant.GranteeName.FullyQualifiedName() grantSchema["grant_option"] = grant.GrantOption - grantSchema["granted_by"] = grant.GrantedBy.FullyQualifiedName() + grantSchema["granted_by"] = grant.GrantedBy.Name() return grantSchema } diff --git a/pkg/schemas/share_gen.go b/pkg/schemas/share_gen.go index 958392ea8e..c3fbe2d101 100644 --- a/pkg/schemas/share_gen.go +++ b/pkg/schemas/share_gen.go @@ -46,7 +46,7 @@ func ShareToSchema(share *sdk.Share) map[string]any { shareSchema["created_on"] = share.CreatedOn.String() shareSchema["kind"] = string(share.Kind) shareSchema["name"] = share.Name.FullyQualifiedName() - shareSchema["database_name"] = share.DatabaseName.FullyQualifiedName() + shareSchema["database_name"] = share.DatabaseName.Name() shareSchema["to"] = share.To shareSchema["owner"] = share.Owner shareSchema["comment"] = share.Comment diff --git a/pkg/schemas/warehouse_gen.go b/pkg/schemas/warehouse_gen.go index 21ee709de6..f8fca72972 100644 --- a/pkg/schemas/warehouse_gen.go +++ b/pkg/schemas/warehouse_gen.go @@ -147,7 +147,7 @@ func WarehouseToSchema(warehouse *sdk.Warehouse) map[string]any { warehouseSchema["comment"] = warehouse.Comment warehouseSchema["enable_query_acceleration"] = warehouse.EnableQueryAcceleration warehouseSchema["query_acceleration_max_scale_factor"] = warehouse.QueryAccelerationMaxScaleFactor - warehouseSchema["resource_monitor"] = warehouse.ResourceMonitor + warehouseSchema["resource_monitor"] = warehouse.ResourceMonitor.Name() warehouseSchema["scaling_policy"] = string(warehouse.ScalingPolicy) warehouseSchema["owner_role_type"] = warehouse.OwnerRoleType return warehouseSchema diff --git a/pkg/sdk/grants.go b/pkg/sdk/grants.go index a3e87e3cec..6a5bbef98c 100644 --- a/pkg/sdk/grants.go +++ b/pkg/sdk/grants.go @@ -266,7 +266,7 @@ func (row grantRow) convert() *Grant { // TODO(SNOW-1058419): Change identifier parsing during identifiers rework name, err := ParseObjectIdentifier(row.Name) if err != nil { - log.Printf("Failed to parse identifier: %s", err) + log.Printf("Failed to parse identifier [%s], err = \"%s\"; falling back to fully qualified name conversion", row.Name, err) name = NewObjectIdentifierFromFullyQualifiedName(row.Name) } diff --git a/pkg/sdk/testint/warehouses_integration_test.go b/pkg/sdk/testint/warehouses_integration_test.go index 7e37f78515..953c11e822 100644 --- a/pkg/sdk/testint/warehouses_integration_test.go +++ b/pkg/sdk/testint/warehouses_integration_test.go @@ -1,19 +1,17 @@ package testint import ( + "fmt" "testing" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers/random" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// TODO [SNOW-1348102 - next PR]: add resource monitor test -// TODO [SNOW-1348102 - next PR]: add test for auto resume (proving SF bug; more unset tests? - yes) -// TODO [SNOW-1348102 - next PR]: test setting empty comment -// TODO [SNOW-1348102 - next PR]: test how suspension.resuming works for different states func TestInt_Warehouses(t *testing.T) { client := testClient(t) ctx := testContext(t) @@ -32,6 +30,14 @@ func TestInt_Warehouses(t *testing.T) { tag2, tag2Cleanup := testClientHelper().Tag.CreateTag(t) t.Cleanup(tag2Cleanup) + resourceMonitor, resourceMonitorCleanup := testClientHelper().ResourceMonitor.CreateResourceMonitor(t) + t.Cleanup(resourceMonitorCleanup) + + startLongRunningQuery := func() { + go client.ExecForTests(ctx, "CALL SYSTEM$WAIT(15);") //nolint:errcheck // we don't care if this eventually errors, as long as it runs for a little while + time.Sleep(3 * time.Second) + } + t.Run("show: without options", func(t *testing.T) { warehouses, err := client.Warehouses.Show(ctx, nil) require.NoError(t, err) @@ -86,6 +92,7 @@ func TestInt_Warehouses(t *testing.T) { AutoSuspend: sdk.Int(1000), AutoResume: sdk.Bool(true), InitiallySuspended: sdk.Bool(false), + ResourceMonitor: sdk.Pointer(resourceMonitor.ID()), Comment: sdk.String("comment"), EnableQueryAcceleration: sdk.Bool(true), QueryAccelerationMaxScaleFactor: sdk.Int(90), @@ -106,14 +113,8 @@ func TestInt_Warehouses(t *testing.T) { require.NoError(t, err) t.Cleanup(testClientHelper().Warehouse.DropWarehouseFunc(t, id)) - warehouses, err := client.Warehouses.Show(ctx, &sdk.ShowWarehouseOptions{ - Like: &sdk.Like{ - Pattern: sdk.String(id.Name()), - }, - }) + warehouse, err := client.Warehouses.ShowByID(ctx, id) require.NoError(t, err) - require.Equal(t, 1, len(warehouses)) - warehouse := warehouses[0] assert.Equal(t, id.Name(), warehouse.Name) assert.Equal(t, sdk.WarehouseTypeStandard, warehouse.Type) assert.Equal(t, sdk.WarehouseSizeSmall, warehouse.Size) @@ -123,6 +124,7 @@ func TestInt_Warehouses(t *testing.T) { assert.Equal(t, 1000, warehouse.AutoSuspend) assert.Equal(t, true, warehouse.AutoResume) assert.Contains(t, []sdk.WarehouseState{sdk.WarehouseStateResuming, sdk.WarehouseStateStarted}, warehouse.State) + assert.Equal(t, resourceMonitor.ID().Name(), warehouse.ResourceMonitor.Name()) assert.Equal(t, "comment", warehouse.Comment) assert.Equal(t, true, warehouse.EnableQueryAcceleration) assert.Equal(t, 90, warehouse.QueryAccelerationMaxScaleFactor) @@ -141,14 +143,8 @@ func TestInt_Warehouses(t *testing.T) { require.NoError(t, err) t.Cleanup(testClientHelper().Warehouse.DropWarehouseFunc(t, id)) - warehouses, err := client.Warehouses.Show(ctx, &sdk.ShowWarehouseOptions{ - Like: &sdk.Like{ - Pattern: sdk.String(id.Name()), - }, - }) + result, err := client.Warehouses.ShowByID(ctx, id) require.NoError(t, err) - require.Equal(t, 1, len(warehouses)) - result := warehouses[0] assert.Equal(t, id.Name(), result.Name) assert.Equal(t, sdk.WarehouseTypeStandard, result.Type) assert.Equal(t, sdk.WarehouseSizeXSmall, result.Size) @@ -163,76 +159,270 @@ func TestInt_Warehouses(t *testing.T) { assert.Equal(t, 8, result.QueryAccelerationMaxScaleFactor) }) - t.Run("describe: when warehouse exists", func(t *testing.T) { - result, err := client.Warehouses.Describe(ctx, precreatedWarehouseId) + t.Run("create: empty comment", func(t *testing.T) { + id := testClientHelper().Ids.RandomAccountObjectIdentifier() + err := client.Warehouses.Create(ctx, id, &sdk.CreateWarehouseOptions{Comment: sdk.String("")}) require.NoError(t, err) - assert.Equal(t, precreatedWarehouseId.Name(), result.Name) - assert.Equal(t, "WAREHOUSE", result.Kind) - assert.WithinDuration(t, time.Now(), result.CreatedOn, 1*time.Minute) - }) + t.Cleanup(testClientHelper().Warehouse.DropWarehouseFunc(t, id)) - t.Run("describe: when warehouse does not exist", func(t *testing.T) { - id := NonExistingAccountObjectIdentifier - _, err := client.Warehouses.Describe(ctx, id) - assert.ErrorIs(t, err, sdk.ErrObjectNotExistOrAuthorized) + result, err := client.Warehouses.ShowByID(ctx, id) + require.NoError(t, err) + assert.Equal(t, "", result.Comment) }) t.Run("alter: set and unset", func(t *testing.T) { - createOptions := &sdk.CreateWarehouseOptions{ - Comment: sdk.String("test comment"), - MaxClusterCount: sdk.Int(10), - } - id := testClientHelper().Ids.RandomAccountObjectIdentifier() // new warehouse created on purpose - warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouseWithOptions(t, id, createOptions) + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) t.Cleanup(warehouseCleanup) + assert.Equal(t, sdk.WarehouseSizeXSmall, warehouse.Size) + assert.Equal(t, 1, warehouse.MaxClusterCount) + assert.Equal(t, 1, warehouse.MinClusterCount) + assert.Equal(t, sdk.ScalingPolicyStandard, warehouse.ScalingPolicy) + assert.Equal(t, 600, warehouse.AutoSuspend) + assert.Equal(t, true, warehouse.AutoResume) + assert.Equal(t, "", warehouse.ResourceMonitor.Name()) + assert.Equal(t, "", warehouse.Comment) + assert.Equal(t, false, warehouse.EnableQueryAcceleration) + assert.Equal(t, 8, warehouse.QueryAccelerationMaxScaleFactor) + alterOptions := &sdk.AlterWarehouseOptions{ + // WarehouseType omitted on purpose - it requires suspending the warehouse (separate test cases) Set: &sdk.WarehouseSet{ - WarehouseSize: &sdk.WarehouseSizeMedium, - AutoSuspend: sdk.Int(1234), - EnableQueryAcceleration: sdk.Bool(true), + WarehouseSize: &sdk.WarehouseSizeMedium, + WaitForCompletion: sdk.Bool(true), + MaxClusterCount: sdk.Int(3), + MinClusterCount: sdk.Int(2), + ScalingPolicy: sdk.Pointer(sdk.ScalingPolicyEconomy), + AutoSuspend: sdk.Int(1234), + AutoResume: sdk.Bool(false), + ResourceMonitor: resourceMonitor.ID(), + Comment: sdk.String("new comment"), + EnableQueryAcceleration: sdk.Bool(true), + QueryAccelerationMaxScaleFactor: sdk.Int(2), }, } err := client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) require.NoError(t, err) - warehouses, err := client.Warehouses.Show(ctx, &sdk.ShowWarehouseOptions{ - Like: &sdk.Like{ - Pattern: sdk.String(warehouse.Name), - }, - }) - assert.Equal(t, 1, len(warehouses)) - result := warehouses[0] + + warehouseAfterSet, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) require.NoError(t, err) - assert.Equal(t, sdk.WarehouseSizeMedium, result.Size) - assert.Equal(t, true, result.EnableQueryAcceleration) - assert.Equal(t, 1234, result.AutoSuspend) - assert.Equal(t, "test comment", result.Comment) - assert.Equal(t, 10, result.MaxClusterCount) + assert.Equal(t, sdk.WarehouseSizeMedium, warehouseAfterSet.Size) + assert.Equal(t, 3, warehouseAfterSet.MaxClusterCount) + assert.Equal(t, 2, warehouseAfterSet.MinClusterCount) + assert.Equal(t, sdk.ScalingPolicyEconomy, warehouseAfterSet.ScalingPolicy) + assert.Equal(t, 1234, warehouseAfterSet.AutoSuspend) + assert.Equal(t, false, warehouseAfterSet.AutoResume) + assert.Equal(t, resourceMonitor.ID().Name(), warehouseAfterSet.ResourceMonitor.Name()) + assert.Equal(t, "new comment", warehouseAfterSet.Comment) + assert.Equal(t, true, warehouseAfterSet.EnableQueryAcceleration) + assert.Equal(t, 2, warehouseAfterSet.QueryAccelerationMaxScaleFactor) alterOptions = &sdk.AlterWarehouseOptions{ + // WarehouseSize omitted on purpose - UNSET is not supported for warehouse size + // WarehouseType, ScalingPolicy, AutoSuspend, and AutoResume omitted on purpose - UNSET do not work correctly + // WaitForCompletion omitted on purpose - no unset Unset: &sdk.WarehouseUnset{ - Comment: sdk.Bool(true), - MaxClusterCount: sdk.Bool(true), + MaxClusterCount: sdk.Bool(true), + MinClusterCount: sdk.Bool(true), + ResourceMonitor: sdk.Bool(true), + Comment: sdk.Bool(true), + EnableQueryAcceleration: sdk.Bool(true), + QueryAccelerationMaxScaleFactor: sdk.Bool(true), }, } - err = client.Warehouses.Alter(ctx, id, alterOptions) + err = client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) require.NoError(t, err) - warehouses, err = client.Warehouses.Show(ctx, &sdk.ShowWarehouseOptions{ - Like: &sdk.Like{ - Pattern: sdk.String(warehouse.Name), + warehouseAfterUnset, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) + require.NoError(t, err) + assert.Equal(t, 1, warehouseAfterUnset.MaxClusterCount) + assert.Equal(t, 1, warehouseAfterUnset.MinClusterCount) + assert.Equal(t, "", warehouseAfterUnset.ResourceMonitor.Name()) + assert.Equal(t, "", warehouseAfterUnset.Comment) + assert.Equal(t, false, warehouseAfterUnset.EnableQueryAcceleration) + assert.Equal(t, 8, warehouseAfterUnset.QueryAccelerationMaxScaleFactor) + }) + + t.Run("alter: set and unset parameters", func(t *testing.T) { + // new warehouse created on purpose + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) + + parameters := testClientHelper().Parameter.ShowWarehouseParameters(t, warehouse.ID()) + + assert.Equal(t, "8", helpers.FindParameter(t, parameters, sdk.AccountParameterMaxConcurrencyLevel).Value) + assert.Equal(t, "0", helpers.FindParameter(t, parameters, sdk.AccountParameterStatementQueuedTimeoutInSeconds).Value) + assert.Equal(t, "172800", helpers.FindParameter(t, parameters, sdk.AccountParameterStatementTimeoutInSeconds).Value) + + alterOptions := &sdk.AlterWarehouseOptions{ + Set: &sdk.WarehouseSet{ + MaxConcurrencyLevel: sdk.Int(4), + StatementQueuedTimeoutInSeconds: sdk.Int(2), + StatementTimeoutInSeconds: sdk.Int(86400), + }, + } + err := client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + require.NoError(t, err) + + parametersAfterSet := testClientHelper().Parameter.ShowWarehouseParameters(t, warehouse.ID()) + assert.Equal(t, "4", helpers.FindParameter(t, parametersAfterSet, sdk.AccountParameterMaxConcurrencyLevel).Value) + assert.Equal(t, "2", helpers.FindParameter(t, parametersAfterSet, sdk.AccountParameterStatementQueuedTimeoutInSeconds).Value) + assert.Equal(t, "86400", helpers.FindParameter(t, parametersAfterSet, sdk.AccountParameterStatementTimeoutInSeconds).Value) + + alterOptions = &sdk.AlterWarehouseOptions{ + Unset: &sdk.WarehouseUnset{ + MaxConcurrencyLevel: sdk.Bool(true), + StatementQueuedTimeoutInSeconds: sdk.Bool(true), + StatementTimeoutInSeconds: sdk.Bool(true), }, + } + err = client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + require.NoError(t, err) + + parametersAfterUnset := testClientHelper().Parameter.ShowWarehouseParameters(t, warehouse.ID()) + assert.Equal(t, "8", helpers.FindParameter(t, parametersAfterUnset, sdk.AccountParameterMaxConcurrencyLevel).Value) + assert.Equal(t, "0", helpers.FindParameter(t, parametersAfterUnset, sdk.AccountParameterStatementQueuedTimeoutInSeconds).Value) + assert.Equal(t, "172800", helpers.FindParameter(t, parametersAfterUnset, sdk.AccountParameterStatementTimeoutInSeconds).Value) + }) + + t.Run("alter: set and unset warehouse type with started warehouse", func(t *testing.T) { + id := testClientHelper().Ids.RandomAccountObjectIdentifier() + // new warehouse created on purpose - we need medium to be able to use snowpark-optimized type + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouseWithOptions(t, id, &sdk.CreateWarehouseOptions{ + WarehouseSize: sdk.Pointer(sdk.WarehouseSizeMedium), }) + t.Cleanup(warehouseCleanup) + + returnedWarehouse, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) require.NoError(t, err) - assert.Equal(t, 1, len(warehouses)) - result = warehouses[0] - assert.Equal(t, warehouse.Name, result.Name) - assert.Equal(t, "", result.Comment) - assert.Equal(t, 1, result.MaxClusterCount) - assert.Equal(t, sdk.WarehouseSizeMedium, result.Size) - assert.Equal(t, true, result.EnableQueryAcceleration) - assert.Equal(t, 1234, result.AutoSuspend) + assert.Equal(t, sdk.WarehouseTypeStandard, returnedWarehouse.Type) + assert.Equal(t, sdk.WarehouseStateStarted, returnedWarehouse.State) + + alterOptions := &sdk.AlterWarehouseOptions{ + Set: &sdk.WarehouseSet{WarehouseType: sdk.Pointer(sdk.WarehouseTypeSnowparkOptimized)}, + } + err = client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + require.NoError(t, err) + + returnedWarehouse, err = client.Warehouses.ShowByID(ctx, warehouse.ID()) + require.NoError(t, err) + assert.Equal(t, sdk.WarehouseTypeSnowparkOptimized, returnedWarehouse.Type) + assert.Equal(t, sdk.WarehouseStateStarted, returnedWarehouse.State) + + // TODO [SNOW-1473453]: uncomment and test when UNSET starts working correctly (expecting to unset to default type STANDARD) + // alterOptions = &sdk.AlterWarehouseOptions{ + // Unset: &sdk.WarehouseUnset{WarehouseType: sdk.Bool(true)}, + // } + // err = client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + // require.NoError(t, err) + // + // returnedWarehouse, err = client.Warehouses.ShowByID(ctx, warehouse.ID()) + // require.NoError(t, err) + // assert.Equal(t, sdk.WarehouseTypeStandard, returnedWarehouse.Type) + // assert.Equal(t, sdk.WarehouseStateStarted, returnedWarehouse.State) + }) + + t.Run("alter: set and unset warehouse type with suspended warehouse", func(t *testing.T) { + id := testClientHelper().Ids.RandomAccountObjectIdentifier() + // new warehouse created on purpose - we need medium to be able to use snowpark-optimized type + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouseWithOptions(t, id, &sdk.CreateWarehouseOptions{ + WarehouseSize: sdk.Pointer(sdk.WarehouseSizeMedium), + InitiallySuspended: sdk.Bool(true), + }) + t.Cleanup(warehouseCleanup) + + returnedWarehouse, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) + require.NoError(t, err) + assert.Equal(t, sdk.WarehouseTypeStandard, returnedWarehouse.Type) + assert.Equal(t, sdk.WarehouseStateSuspended, returnedWarehouse.State) + + alterOptions := &sdk.AlterWarehouseOptions{ + Set: &sdk.WarehouseSet{WarehouseType: sdk.Pointer(sdk.WarehouseTypeSnowparkOptimized)}, + } + err = client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + require.NoError(t, err) + + returnedWarehouse, err = client.Warehouses.ShowByID(ctx, warehouse.ID()) + require.NoError(t, err) + assert.Equal(t, sdk.WarehouseTypeSnowparkOptimized, returnedWarehouse.Type) + assert.Equal(t, sdk.WarehouseStateSuspended, returnedWarehouse.State) + + // TODO [SNOW-1473453]: uncomment and test when UNSET starts working correctly (expecting to unset to default type STANDARD) + // alterOptions = &sdk.AlterWarehouseOptions{ + // Unset: &sdk.WarehouseUnset{WarehouseType: sdk.Bool(true)}, + // } + // err = client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + // require.NoError(t, err) + // + // returnedWarehouse, err = client.Warehouses.ShowByID(ctx, warehouse.ID()) + // require.NoError(t, err) + // assert.Equal(t, sdk.WarehouseTypeStandard, returnedWarehouse.Type) + // assert.Equal(t, sdk.WarehouseStateStarted, returnedWarehouse.State) + }) + + t.Run("alter: prove problems with unset auto suspend", func(t *testing.T) { + // new warehouse created on purpose + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) + + alterOptions := &sdk.AlterWarehouseOptions{ + Unset: &sdk.WarehouseUnset{AutoSuspend: sdk.Bool(true)}, + } + err := client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + require.NoError(t, err) + + returnedWarehouse, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) + require.NoError(t, err) + // TODO [SNOW-1473453]: change when UNSET starts working correctly (expecting to unset to default 600) + // assert.Equal(t, "600", returnedWarehouse.AutoSuspend) + assert.Equal(t, 0, returnedWarehouse.AutoSuspend) + }) + + t.Run("alter: prove problems with unset warehouse type", func(t *testing.T) { + // new warehouse created on purpose + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) + + alterOptions := &sdk.AlterWarehouseOptions{ + Unset: &sdk.WarehouseUnset{WarehouseType: sdk.Bool(true)}, + } + err := client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + // TODO [SNOW-1473453]: change when UNSET starts working correctly (expecting to unset to default type STANDARD) + require.Error(t, err) + require.ErrorContains(t, err, "invalid type of property 'null' for 'WAREHOUSE_TYPE'") + }) + + t.Run("alter: prove problems with unset scaling policy", func(t *testing.T) { + // new warehouse created on purpose + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) + + alterOptions := &sdk.AlterWarehouseOptions{ + Unset: &sdk.WarehouseUnset{ScalingPolicy: sdk.Bool(true)}, + } + err := client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + // TODO [SNOW-1473453]: change when UNSET starts working correctly (expecting to unset to default scaling policy STANDARD) + require.Error(t, err) + require.ErrorContains(t, err, "invalid type of property 'null' for 'SCALING_POLICY'") + }) + + t.Run("alter: prove problems with unset auto resume", func(t *testing.T) { + // new warehouse created on purpose + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) + + alterOptions := &sdk.AlterWarehouseOptions{ + Unset: &sdk.WarehouseUnset{AutoResume: sdk.Bool(true)}, + } + err := client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + require.NoError(t, err) + + returnedWarehouse, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) + require.NoError(t, err) + // TODO [SNOW-1473453]: change when UNSET starts working correctly (expecting to unset to default auto resume TRUE) + // assert.Equal(t, true, returnedWarehouse.AutoResume) + assert.Equal(t, false, returnedWarehouse.AutoResume) }) t.Run("alter: rename", func(t *testing.T) { @@ -253,6 +443,42 @@ func TestInt_Warehouses(t *testing.T) { assert.Equal(t, newID.Name(), result.Name) }) + // This proves that we don't have to handle empty comment inside the resource. + t.Run("alter: set empty comment versus unset", func(t *testing.T) { + id := testClientHelper().Ids.RandomAccountObjectIdentifier() + err := client.Warehouses.Create(ctx, id, &sdk.CreateWarehouseOptions{Comment: sdk.String("abc")}) + require.NoError(t, err) + t.Cleanup(testClientHelper().Warehouse.DropWarehouseFunc(t, id)) + + // can't use normal way, because of our SDK validation + _, err = client.ExecForTests(ctx, fmt.Sprintf("ALTER WAREHOUSE %s SET COMMENT = ''", id.FullyQualifiedName())) + require.NoError(t, err) + + warehouse, err := client.Warehouses.ShowByID(ctx, id) + require.NoError(t, err) + assert.Equal(t, "", warehouse.Comment) + + alterOptions := &sdk.AlterWarehouseOptions{ + Set: &sdk.WarehouseSet{ + Comment: sdk.String("abc"), + }, + } + err = client.Warehouses.Alter(ctx, id, alterOptions) + require.NoError(t, err) + + alterOptions = &sdk.AlterWarehouseOptions{ + Unset: &sdk.WarehouseUnset{ + Comment: sdk.Bool(true), + }, + } + err = client.Warehouses.Alter(ctx, id, alterOptions) + require.NoError(t, err) + + warehouse, err = client.Warehouses.ShowByID(ctx, id) + require.NoError(t, err) + assert.Equal(t, "", warehouse.Comment) + }) + t.Run("alter: suspend and resume", func(t *testing.T) { // new warehouse created on purpose warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) @@ -263,30 +489,27 @@ func TestInt_Warehouses(t *testing.T) { } err := client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) require.NoError(t, err) - warehouses, err := client.Warehouses.Show(ctx, &sdk.ShowWarehouseOptions{ - Like: &sdk.Like{ - Pattern: sdk.String(warehouse.Name), - }, - }) + + result, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) require.NoError(t, err) - assert.Equal(t, 1, len(warehouses)) - result := warehouses[0] assert.Contains(t, []sdk.WarehouseState{sdk.WarehouseStateSuspended, sdk.WarehouseStateSuspending}, result.State) + // check what happens if we suspend the already suspended warehouse + err = client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + require.ErrorContains(t, err, "090064 (22000): Invalid state.") + alterOptions = &sdk.AlterWarehouseOptions{ Resume: sdk.Bool(true), } err = client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) require.NoError(t, err) - warehouses, err = client.Warehouses.Show(ctx, &sdk.ShowWarehouseOptions{ - Like: &sdk.Like{ - Pattern: sdk.String(warehouse.Name), - }, - }) + result, err = client.Warehouses.ShowByID(ctx, warehouse.ID()) require.NoError(t, err) - assert.Equal(t, 1, len(warehouses)) - result = warehouses[0] assert.Contains(t, []sdk.WarehouseState{sdk.WarehouseStateStarted, sdk.WarehouseStateResuming}, result.State) + + // check what happens if we resume the already started warehouse + err = client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + require.ErrorContains(t, err, "090063 (22000): Invalid state.") }) t.Run("alter: resume without suspending", func(t *testing.T) { @@ -300,14 +523,9 @@ func TestInt_Warehouses(t *testing.T) { } err := client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) require.NoError(t, err) - warehouses, err := client.Warehouses.Show(ctx, &sdk.ShowWarehouseOptions{ - Like: &sdk.Like{ - Pattern: sdk.String(warehouse.Name), - }, - }) + + result, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) require.NoError(t, err) - assert.Equal(t, 1, len(warehouses)) - result := warehouses[0] assert.Contains(t, []sdk.WarehouseState{sdk.WarehouseStateStarted, sdk.WarehouseStateResuming}, result.State) }) @@ -316,22 +534,11 @@ func TestInt_Warehouses(t *testing.T) { warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) t.Cleanup(warehouseCleanup) - resetWarehouse := testClientHelper().Warehouse.UseWarehouse(t, warehouse.ID()) - t.Cleanup(resetWarehouse) - - // Start a long query - go client.ExecForTests(ctx, "CALL SYSTEM$WAIT(30);") //nolint:errcheck // we don't care if this eventually errors, as long as it runs for a little while - time.Sleep(5 * time.Second) + startLongRunningQuery() // Check that query is running - warehouses, err := client.Warehouses.Show(ctx, &sdk.ShowWarehouseOptions{ - Like: &sdk.Like{ - Pattern: sdk.String(warehouse.Name), - }, - }) + result, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) require.NoError(t, err) - assert.Equal(t, 1, len(warehouses)) - result := warehouses[0] assert.Equal(t, 1, result.Running) assert.Equal(t, 0, result.Queued) @@ -343,21 +550,59 @@ func TestInt_Warehouses(t *testing.T) { require.NoError(t, err) // Wait for abort to be effective - time.Sleep(5 * time.Second) + time.Sleep(2 * time.Second) // Check no query is running - warehouses, err = client.Warehouses.Show(ctx, &sdk.ShowWarehouseOptions{ - Like: &sdk.Like{ - Pattern: sdk.String(warehouse.Name), - }, - }) + result, err = client.Warehouses.ShowByID(ctx, warehouse.ID()) require.NoError(t, err) - assert.Equal(t, 1, len(warehouses)) - result = warehouses[0] assert.Equal(t, 0, result.Running) assert.Equal(t, 0, result.Queued) }) + t.Run("alter: suspend with a long running-query", func(t *testing.T) { + // new warehouse created on purpose + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) + + startLongRunningQuery() + + // Suspend the warehouse + alterOptions := &sdk.AlterWarehouseOptions{ + Suspend: sdk.Bool(true), + } + err := client.Warehouses.Alter(ctx, warehouse.ID(), alterOptions) + require.NoError(t, err) + + // check the state - it seems that the warehouse is suspended despite having a running query on it + result, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) + require.NoError(t, err) + assert.Equal(t, 1, result.Running) + assert.Equal(t, 0, result.Queued) + assert.Equal(t, sdk.WarehouseStateSuspended, result.State) + }) + + t.Run("alter: resize with a long running-query", func(t *testing.T) { + // new warehouse created on purpose + warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) + + startLongRunningQuery() + + // Resize the warehouse + err := client.Warehouses.Alter(ctx, warehouse.ID(), &sdk.AlterWarehouseOptions{ + Set: &sdk.WarehouseSet{WarehouseSize: sdk.Pointer(sdk.WarehouseSizeMedium)}, + }) + require.NoError(t, err) + + // check the state - it seems it's resized despite query being run on it + result, err := client.Warehouses.ShowByID(ctx, warehouse.ID()) + require.NoError(t, err) + assert.Equal(t, sdk.WarehouseStateStarted, result.State) + assert.Equal(t, sdk.WarehouseSizeMedium, result.Size) + assert.Equal(t, 1, result.Running) + assert.Equal(t, 0, result.Queued) + }) + t.Run("alter: set tags and unset tags", func(t *testing.T) { // new warehouse created on purpose warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) @@ -403,30 +648,32 @@ func TestInt_Warehouses(t *testing.T) { }) t.Run("describe: when warehouse exists", func(t *testing.T) { - // new warehouse created on purpose - warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) - t.Cleanup(warehouseCleanup) - - err := client.Warehouses.Drop(ctx, warehouse.ID(), nil) + result, err := client.Warehouses.Describe(ctx, precreatedWarehouseId) require.NoError(t, err) - _, err = client.Warehouses.Describe(ctx, warehouse.ID()) - assert.ErrorIs(t, err, sdk.ErrObjectNotExistOrAuthorized) + assert.Equal(t, precreatedWarehouseId.Name(), result.Name) + assert.Equal(t, "WAREHOUSE", result.Kind) + assert.NotEmpty(t, result.CreatedOn) }) t.Run("describe: when warehouse does not exist", func(t *testing.T) { - err := client.Warehouses.Drop(ctx, NonExistingAccountObjectIdentifier, nil) + id := NonExistingAccountObjectIdentifier + _, err := client.Warehouses.Describe(ctx, id) assert.ErrorIs(t, err, sdk.ErrObjectNotExistOrAuthorized) }) - t.Run("describe: when warehouse exists and if exists is true", func(t *testing.T) { + t.Run("drop: when warehouse exists", func(t *testing.T) { // new warehouse created on purpose warehouse, warehouseCleanup := testClientHelper().Warehouse.CreateWarehouse(t) t.Cleanup(warehouseCleanup) - dropOptions := &sdk.DropWarehouseOptions{IfExists: sdk.Bool(true)} - err := client.Warehouses.Drop(ctx, warehouse.ID(), dropOptions) + err := client.Warehouses.Drop(ctx, warehouse.ID(), nil) require.NoError(t, err) _, err = client.Warehouses.Describe(ctx, warehouse.ID()) assert.ErrorIs(t, err, sdk.ErrObjectNotExistOrAuthorized) }) + + t.Run("drop: when warehouse does not exist", func(t *testing.T) { + err := client.Warehouses.Drop(ctx, NonExistingAccountObjectIdentifier, nil) + assert.ErrorIs(t, err, sdk.ErrObjectNotExistOrAuthorized) + }) } diff --git a/pkg/sdk/warehouses.go b/pkg/sdk/warehouses.go index 96aab323c5..86ae91da7f 100644 --- a/pkg/sdk/warehouses.go +++ b/pkg/sdk/warehouses.go @@ -5,6 +5,7 @@ import ( "database/sql" "errors" "fmt" + "log" "strconv" "strings" "time" @@ -305,10 +306,35 @@ func (c *warehouses) Alter(ctx context.Context, id AccountObjectIdentifier, opts if err != nil { return err } + + // Warehouse needs to be suspended to change its type. + if opts.warehouseTypeIsChanged() { + warehouse, err := c.ShowByID(ctx, id) + if err != nil { + return err + } + if warehouse.State == WarehouseStateStarted { + err := c.Alter(ctx, id, &AlterWarehouseOptions{Suspend: Bool(true)}) + if err != nil { + return err + } + defer func() { + err := c.Alter(ctx, id, &AlterWarehouseOptions{Resume: Bool(true)}) + if err != nil { + log.Printf("[DEBUG] error occurred during warehouse resumption, err=%v", err) + } + }() + } + } + _, err = c.client.exec(ctx, sql) return err } +func (opts *AlterWarehouseOptions) warehouseTypeIsChanged() bool { + return opts.Set != nil && opts.Set.WarehouseType != nil +} + // DropWarehouseOptions is based on https://docs.snowflake.com/en/sql-reference/sql/drop-warehouse. type DropWarehouseOptions struct { drop bool `ddl:"static" sql:"DROP"` @@ -397,7 +423,7 @@ type Warehouse struct { Comment string EnableQueryAcceleration bool QueryAccelerationMaxScaleFactor int - ResourceMonitor string + ResourceMonitor AccountObjectIdentifier ScalingPolicy ScalingPolicy OwnerRoleType string } @@ -462,7 +488,6 @@ func (row warehouseDBRow) convert() *Warehouse { Comment: row.Comment, EnableQueryAcceleration: row.EnableQueryAcceleration, QueryAccelerationMaxScaleFactor: row.QueryAccelerationMaxScaleFactor, - ResourceMonitor: row.ResourceMonitor, ScalingPolicy: ScalingPolicy(row.ScalingPolicy), } if val, err := strconv.ParseFloat(row.Available, 64); err != nil { @@ -483,6 +508,9 @@ func (row warehouseDBRow) convert() *Warehouse { if row.OwnerRoleType.Valid { wh.OwnerRoleType = row.OwnerRoleType.String } + if row.ResourceMonitor != "null" { + wh.ResourceMonitor = NewAccountObjectIdentifierFromFullyQualifiedName(row.ResourceMonitor) + } return wh }