From f90ab4da9e13086ebeda477483842a4edfacd0a5 Mon Sep 17 00:00:00 2001 From: Steve Swor Date: Mon, 9 Aug 2021 16:09:06 +1000 Subject: [PATCH 1/5] Add data sharing resource. --- docs/resources/datashare.md | 108 +++++ .../resources/redshift_datashare/resource.tf | 41 ++ redshift/helpers.go | 3 +- redshift/provider.go | 1 + redshift/resource_redshift_datashare.go | 404 ++++++++++++++++++ redshift/resource_redshift_datashare_test.go | 175 ++++++++ redshift/resource_redshift_schema.go | 10 +- redshift/resource_redshift_schema_test.go | 8 +- 8 files changed, 740 insertions(+), 10 deletions(-) create mode 100644 docs/resources/datashare.md create mode 100644 examples/resources/redshift_datashare/resource.tf create mode 100644 redshift/resource_redshift_datashare.go create mode 100644 redshift/resource_redshift_datashare_test.go diff --git a/docs/resources/datashare.md b/docs/resources/datashare.md new file mode 100644 index 0000000..dde6dae --- /dev/null +++ b/docs/resources/datashare.md @@ -0,0 +1,108 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "redshift_datashare Resource - terraform-provider-redshift" +subcategory: "" +description: |- + Defines a Redshift datashare. Datashares allows a Redshift cluster (the "consumer") to + read data stored in another Redshift cluster (the "producer"). For more information, see + https://docs.aws.amazon.com/redshift/latest/dg/datashare-overview.html + The redshift_datashare resource should be defined on the producer cluster. + Note: Data sharing is only supported on certain Redshift instance families, + such as RA3. +--- + +# redshift_datashare (Resource) + +Defines a Redshift datashare. Datashares allows a Redshift cluster (the "consumer") to +read data stored in another Redshift cluster (the "producer"). For more information, see +https://docs.aws.amazon.com/redshift/latest/dg/datashare-overview.html + +The redshift_datashare resource should be defined on the producer cluster. + +Note: Data sharing is only supported on certain Redshift instance families, +such as RA3. + +## Example Usage + +```terraform +# Example: data share which includes all tables/views and functions in the specified schema. +# New tables/views and functions are automatically added to the datashare. +resource "redshift_datashare" "datashare_auto" { + name = "my_automatic_datashare" # Required + owner = "my_user" # Optional + + schema { + name = "public" # Required + mode = "auto" # Required + } +} + +# Example: data share which explicitly specifies tables/views and functions +resource "redshift_datashare" "datashare_manual" { + name = "my_manual_datashare" # Required + + schema { + name = "public" # Required + mode = "manual" # Required + tables = [ # Optional. If unspecified then no tables/views will be added. + "my_table", + "my_view", + "my_late_binding_view", + "my_materialized_view", + ] + functions = [ # Optional. If unspecified then no functions will be added. + "my_sql_udf", + ] + } +} + +# Example: Datashare that can be shared with publicly available consumer clusters. +resource "redshift_datashare" "publicly_accessible_datashare" { + name = "my_public_datashare" # Required + publicly_accessible = true # Optional. Default is `false` + + schema { + name = "public" # Required + mode = "auto" # Required + } +} +``` + + +## Schema + +### Required + +- **name** (String) The name of the datashare. + +### Optional + +- **id** (String) The ID of this resource. +- **owner** (String) The user who owns the datashare. +- **publicly_accessible** (Boolean) Specifies whether the datashare can be shared to clusters that are publicly accessible. Default is `false`. +- **schema** (Block Set) Defines which objects in the specified schema are exposed to the data share (see [below for nested schema](#nestedblock--schema)) + +### Read-Only + +- **created** (String) The date when datashare was created +- **producer_account** (String) The ID for the datashare producer account. +- **producer_namespace** (String) The unique cluster identifier for the datashare producer cluster. + + +### Nested Schema for `schema` + +Required: + +- **mode** (String) Configures how schema objects will be exposed to the datashare. Must be either `auto` or `manual`. + + In `auto` mode, all tables, views, and UDFs will be exposed to the datashare, and Redshift will automatically expose new tables, views, and functions in the schema to the datashare (without requiring `terraform apply` to be run again). + + In `manual` mode, only the `tables` and `functions` explicitly declared in the `schema` block will be exposed to the datashare. +- **name** (String) The name of the schema + +Optional: + +- **functions** (Set of String) UDFs that are to exposed to the datashare. You should configure this attribute explicitly when using `manual` mode. When using `auto` mode, this is treated as a computed attribute and you should not explicitly declare it. +- **tables** (Set of String) Tables and views that are exposed to the datashare. You should configure this attribute explicitly when using `manual` mode. When using `auto` mode, this is treated as a computed attribute and you should not explicitly declare it. + + diff --git a/examples/resources/redshift_datashare/resource.tf b/examples/resources/redshift_datashare/resource.tf new file mode 100644 index 0000000..8694cd7 --- /dev/null +++ b/examples/resources/redshift_datashare/resource.tf @@ -0,0 +1,41 @@ +# Example: data share which includes all tables/views and functions in the specified schema. +# New tables/views and functions are automatically added to the datashare. +resource "redshift_datashare" "datashare_auto" { + name = "my_automatic_datashare" # Required + owner = "my_user" # Optional + + schema { + name = "public" # Required + mode = "auto" # Required + } +} + +# Example: data share which explicitly specifies tables/views and functions +resource "redshift_datashare" "datashare_manual" { + name = "my_manual_datashare" # Required + + schema { + name = "public" # Required + mode = "manual" # Required + tables = [ # Optional. If unspecified then no tables/views will be added. + "my_table", + "my_view", + "my_late_binding_view", + "my_materialized_view", + ] + functions = [ # Optional. If unspecified then no functions will be added. + "my_sql_udf", + ] + } +} + +# Example: Datashare that can be shared with publicly available consumer clusters. +resource "redshift_datashare" "publicly_accessible_datashare" { + name = "my_public_datashare" # Required + publicly_accessible = true # Optional. Default is `false` + + schema { + name = "public" # Required + mode = "auto" # Required + } +} diff --git a/redshift/helpers.go b/redshift/helpers.go index 4d9ae94..985d132 100644 --- a/redshift/helpers.go +++ b/redshift/helpers.go @@ -128,11 +128,12 @@ func isRetryablePQError(code string) bool { return ok } -func splitCsvAndTrim(raw string) ([]string, error) { +func splitCsvAndTrim(raw string, delimiter rune) ([]string, error) { if raw == "" { return []string{}, nil } reader := csv.NewReader(strings.NewReader(raw)) + reader.Comma = delimiter rawSlice, err := reader.Read() if err != nil { return nil, err diff --git a/redshift/provider.go b/redshift/provider.go index 29b9ced..180e5c9 100644 --- a/redshift/provider.go +++ b/redshift/provider.go @@ -121,6 +121,7 @@ func Provider() *schema.Provider { "redshift_schema": redshiftSchema(), "redshift_privilege": redshiftPrivilege(), "redshift_database": redshiftDatabase(), + "redshift_datashare": redshiftDatashare(), }, DataSourcesMap: map[string]*schema.Resource{ "redshift_user": dataSourceRedshiftUser(), diff --git a/redshift/resource_redshift_datashare.go b/redshift/resource_redshift_datashare.go new file mode 100644 index 0000000..e6c653e --- /dev/null +++ b/redshift/resource_redshift_datashare.go @@ -0,0 +1,404 @@ +package redshift + +import ( + "database/sql" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/lib/pq" +) + +func redshiftDatashare() *schema.Resource { + return &schema.Resource{ + Description: ` +Defines a Redshift datashare. Datashares allows a Redshift cluster (the "consumer") to +read data stored in another Redshift cluster (the "producer"). For more information, see +https://docs.aws.amazon.com/redshift/latest/dg/datashare-overview.html + +The redshift_datashare resource should be defined on the producer cluster. + +Note: Data sharing is only supported on certain Redshift instance families, +such as RA3. +`, + Exists: RedshiftResourceExistsFunc(resourceRedshiftDatashareExists), + Create: RedshiftResourceFunc(resourceRedshiftDatashareCreate), + Read: RedshiftResourceFunc(resourceRedshiftDatashareRead), + Update: RedshiftResourceFunc(resourceRedshiftDatashareUpdate), + Delete: RedshiftResourceFunc(resourceRedshiftDatashareDelete), + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Description: "The name of the datashare.", + Required: true, + ForceNew: true, + StateFunc: func(val interface{}) string { + return strings.ToLower(val.(string)) + }, + }, + "owner": { + Type: schema.TypeString, + Description: "The user who owns the datashare.", + Optional: true, + Computed: true, + StateFunc: func(val interface{}) string { + return strings.ToLower(val.(string)) + }, + }, + "publicly_accessible": { + Type: schema.TypeBool, + Description: "Specifies whether the datashare can be shared to clusters that are publicly accessible. Default is `false`.", + Optional: true, + Default: false, + }, + "producer_account": { + Type: schema.TypeString, + Description: "The ID for the datashare producer account.", + Computed: true, + }, + "producer_namespace": { + Type: schema.TypeString, + Description: "The unique cluster identifier for the datashare producer cluster.", + Computed: true, + }, + "created": { + Type: schema.TypeString, + Description: "The date when datashare was created", + Computed: true, + }, + "schema": { + Type: schema.TypeSet, + Optional: true, + Description: "Defines which objects in the specified schema are exposed to the data share", + Set: resourceRedshiftDatashareSchemaHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the schema", + StateFunc: func(val interface{}) string { + return strings.ToLower(val.(string)) + }, + }, + "mode": { + Type: schema.TypeString, + Required: true, + Description: "Configures how schema objects will be exposed to the datashare. Must be either `auto` or `manual`.\n\n" + + " In `auto` mode, all tables, views, and UDFs will be exposed to the datashare, and Redshift will automatically expose new tables, views, and functions in the schema to the datashare (without requiring `terraform apply` to be run again).\n\n" + + " In `manual` mode, only the `tables` and `functions` explicitly declared in the `schema` block will be exposed to the datashare.", + StateFunc: func(val interface{}) string { + return strings.ToLower(val.(string)) + }, + ValidateFunc: validation.StringInSlice([]string{ + "auto", + "manual", + }, false), + }, + "tables": { + Type: schema.TypeSet, + Description: "Tables and views that are exposed to the datashare. You should configure this attribute explicitly when using `manual` mode. When using `auto` mode, this is treated as a computed attribute and you should not explicitly declare it.", + Optional: true, + Computed: true, + Set: schema.HashString, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "functions": { + Type: schema.TypeSet, + Description: "UDFs that are to exposed to the datashare. You should configure this attribute explicitly when using `manual` mode. When using `auto` mode, this is treated as a computed attribute and you should not explicitly declare it.", + Optional: true, + Computed: true, + Set: schema.HashString, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } +} + +func resourceRedshiftDatashareSchemaHash(v interface{}) int { + schemaResource := v.(map[string]interface{}) + schemaName := schemaResource["name"].(string) + return schema.HashString(schemaName) +} + +func resourceRedshiftDatashareExists(db *DBConnection, d *schema.ResourceData) (bool, error) { + var name string + query := "SELECT share_name FROM svv_datashares WHERE share_type='OUTBOUND' AND share_id=$1" + log.Printf("[DEBUG] check if datashare exists: %s\n", query) + err := db.QueryRow(query, d.Id()).Scan(&name) + + switch { + case err == sql.ErrNoRows: + return false, nil + case err != nil: + return false, err + } + + return true, nil +} + +func resourceRedshiftDatashareCreate(db *DBConnection, d *schema.ResourceData) error { + tx, err := startTransaction(db.client, "") + if err != nil { + return err + } + defer deferredRollback(tx) + + shareName := d.Get("name").(string) + + log.Println("[DEBUG] Creating datashare") + query := fmt.Sprintf("CREATE DATASHARE %s SET PUBLICACCESSIBLE = %t", pq.QuoteIdentifier(shareName), d.Get("publicly_accessible").(bool)) + + if _, err := tx.Exec(query); err != nil { + return err + } + + var shareId string + query = "SELECT share_id FROM SVV_DATASHARES WHERE share_type = 'OUTBOUND' AND share_name = $1" + log.Println("[DEBUG] getting datashare id") + if err := tx.QueryRow(query, strings.ToLower(shareName)).Scan(&shareId); err != nil { + return err + } + + d.SetId(shareId) + + if owner, ownerIsSet := d.GetOk("owner"); ownerIsSet { + log.Println("[DEBUG] Setting datashare owner") + _, err = tx.Exec(fmt.Sprintf("ALTER DATASHARE %s OWNER TO %s", pq.QuoteIdentifier(strings.ToLower(shareName)), pq.QuoteIdentifier(strings.ToLower(owner.(string))))) + if err != nil { + return err + } + } + + for _, schema := range d.Get("schema").(*schema.Set).List() { + err = resourceRedshiftDatashareAddSchema(tx, d, schema.(map[string]interface{})) + if err != nil { + return err + } + err = resourceRedshiftDatashareAddTables(tx, d, schema.(map[string]interface{})) + if err != nil { + return err + } + err = resourceRedshiftDatashareAddFunctions(tx, d, schema.(map[string]interface{})) + if err != nil { + return err + } + } + + if err = tx.Commit(); err != nil { + return fmt.Errorf("could not commit transaction: %w", err) + } + + return resourceRedshiftDatashareRead(db, d) +} + +func resourceRedshiftDatashareAddSchema(tx *sql.Tx, d *schema.ResourceData, schema map[string]interface{}) error { + shareName := d.Get("name").(string) + schemaName := schema["name"].(string) + mode := schema["mode"].(string) + log.Println("[DEBUG] Adding schema to datashare") + _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + if err != nil { + return err + } + if mode == "auto" { + _, err = tx.Exec(fmt.Sprintf("ALTER DATASHARE %s SET INCLUDENEW = TRUE FOR SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + if err != nil { + return err + } + } + return nil +} + +func resourceRedshiftDatashareAddTables(tx *sql.Tx, d *schema.ResourceData, schemaConfig map[string]interface{}) error { + shareName := d.Get("name").(string) + schemaName := schemaConfig["name"].(string) + mode := schemaConfig["mode"].(string) + switch mode { + case "auto": + log.Println("[DEBUG] Adding all tables to datashare") + _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD ALL TABLES IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + if err != nil { + return err + } + case "manual": + log.Println("[DEBUG] Adding individual tables to datashare") + for _, table := range schemaConfig["tables"].(*schema.Set).List() { + _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD TABLE %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(table.(string)))) + if err != nil { + return err + } + } + default: + return fmt.Errorf("Unsupported datashare schema mode: %s", mode) + } + return nil +} + +func resourceRedshiftDatashareAddFunctions(tx *sql.Tx, d *schema.ResourceData, schemaConfig map[string]interface{}) error { + shareName := d.Get("name").(string) + schemaName := schemaConfig["name"].(string) + mode := schemaConfig["mode"].(string) + switch mode { + case "auto": + log.Println("[DEBUG] Adding all functions to datashare") + _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD ALL FUNCTIONS IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + if err != nil { + return err + } + case "manual": + log.Println("[DEBUG] Adding individual functions to datashare") + for _, table := range schemaConfig["functions"].(*schema.Set).List() { + _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD FUNCTION %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(table.(string)))) + if err != nil { + return err + } + } + default: + return fmt.Errorf("Unsupported datashare schema mode: %s", mode) + } + return nil +} + +func resourceRedshiftDatashareRead(db *DBConnection, d *schema.ResourceData) error { + var shareName, owner, producerAccount, producerNamespace, created string + var publicAccessible bool + + tx, err := startTransaction(db.client, "") + if err != nil { + return err + } + defer deferredRollback(tx) + + log.Println("[DEBUG] reading datashare") + err = tx.QueryRow(` + SELECT + trim(svv_datashares.share_name), + trim(pg_user.usename), + svv_datashares.is_publicaccessible, + TRIM(COALESCE(svv_datashares.producer_account, '')), + TRIM(COALESCE(svv_datashares.producer_namespace, '')), + REPLACE(TO_CHAR(svv_datashares.createdate, 'YYYY-MM-DD HH24:MI:SS'), ' ', 'T') || 'Z' + FROM svv_datashares + LEFT JOIN pg_user ON svv_datashares.share_owner = pg_user.usesysid + WHERE share_type = 'OUTBOUND' + AND share_id = $1`, d.Id()).Scan(&shareName, &owner, &publicAccessible, &producerAccount, &producerNamespace, &created) + if err != nil { + return err + } + + d.Set("name", shareName) + d.Set("owner", owner) + d.Set("publicly_accessible", publicAccessible) + d.Set("producer_account", producerAccount) + d.Set("producer_namespace", producerNamespace) + d.Set("created", created) + + // TODO read schemas + if err = readDatashareSchemas(tx, shareName, d); err != nil { + return err + } + + if err = tx.Commit(); err != nil { + return err + } + + return nil +} + +func readDatashareSchemas(tx *sql.Tx, shareName string, d *schema.ResourceData) error { + // Run a single query to fetch all datashare object info. + // Order doesn't matter here since + // a) we're storing the data in sets, and + // b) Redshift won't allow you to add schema objects to a datashare until after you've added the schema itself, + // so if we see a table/view/function/etc we can safely assume the schema is also added. + rows, err := tx.Query(` + SELECT + object_name, + object_type, + COALESCE(include_new, FALSE) + FROM svv_datashare_objects + WHERE share_type = 'OUTBOUND' + AND share_name = $1 + `, shareName) + if err != nil { + return err + } + defer rows.Close() + schemasByName := make(map[string]map[string]interface{}) + for rows.Next() { + var objectName, objectType string + var includeNew bool + if err = rows.Scan(&objectName, &objectType, &includeNew); err != nil { + return err + } + + // resolve schema name + objectNameSlice, err := splitCsvAndTrim(objectName, '.') + if err != nil { + return fmt.Errorf("Unable to parse datashare object name, %w", err) + } + if len(objectNameSlice) < 1 || len(objectNameSlice) > 2 { + return fmt.Errorf("Unable to parse datashare object name") + } + schemaName := objectNameSlice[0] + objectName = objectNameSlice[len(objectNameSlice)-1] + + // get/create schema entry + schemaDef, ok := schemasByName[schemaName] + if !ok { + // schema entry doesn't exist so create it + schemaDef = make(map[string]interface{}) + schemaDef["name"] = schemaName + schemaDef["tables"] = schema.NewSet(schema.HashString, make([]interface{}, 0)) + schemaDef["functions"] = schema.NewSet(schema.HashString, make([]interface{}, 0)) + schemasByName[schemaName] = schemaDef + } + + // now finally we can populate the schema info + switch strings.ToLower(objectType) { + case "schema": + if includeNew { + schemaDef["mode"] = "auto" + } else { + schemaDef["mode"] = "manual" + } + case "table", "view", "late binding view", "materialized view": + schemaDef["tables"].(*schema.Set).Add(objectName) + case "function": + schemaDef["functions"].(*schema.Set).Add(objectName) + default: + log.Printf("[WARN] Ignoring datashare object %s.%s with type %s\n", schemaName, objectName, objectType) + } + } + + // convert map to set + schemaSlice := make([]interface{}, 0) + for _, schemaDef := range schemasByName { + schemaSlice = append(schemaSlice, schemaDef) + } + schemas := schema.NewSet(resourceRedshiftDatashareSchemaHash, schemaSlice) + d.Set("schema", schemas) + return nil +} + +func resourceRedshiftDatashareUpdate(db *DBConnection, d *schema.ResourceData) error { + // TODO implement + return nil +} + +func resourceRedshiftDatashareDelete(db *DBConnection, d *schema.ResourceData) error { + shareName := d.Get("name").(string) + log.Println("[DEBUG] deleting datashare") + query := fmt.Sprintf("DROP DATASHARE %s", pq.QuoteIdentifier(shareName)) + _, err := db.Exec(query) + return err +} diff --git a/redshift/resource_redshift_datashare_test.go b/redshift/resource_redshift_datashare_test.go new file mode 100644 index 0000000..ce0a0dc --- /dev/null +++ b/redshift/resource_redshift_datashare_test.go @@ -0,0 +1,175 @@ +package redshift + +import ( + "database/sql" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/lib/pq" +) + +func TestAccRedshiftDatashare_Basic(t *testing.T) { + shareName := strings.ReplaceAll(acctest.RandomWithPrefix("tf_acc_datashare_basic"), "-", "_") + config := fmt.Sprintf(` +resource "redshift_schema" "schema" { + name = %[1]q + cascade_on_delete = true +} + +resource "redshift_user" "user" { + name = %[1]q +} + +resource "redshift_datashare" "basic" { + name = %[1]q + owner = redshift_user.user.name + schema { + name = redshift_schema.schema.name + mode = "auto" + } +} +`, shareName) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRedshiftDatashareDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testAccCheckRedshiftDatashareExists(shareName), + resource.TestCheckResourceAttr("redshift_datashare.basic", "name", shareName), + resource.TestCheckResourceAttr("redshift_datashare.basic", "owner", shareName), + resource.TestCheckResourceAttr("redshift_datashare.basic", "publicly_accessible", "false"), + resource.TestCheckResourceAttrSet("redshift_datashare.basic", "producer_account"), + resource.TestCheckResourceAttrSet("redshift_datashare.basic", "producer_namespace"), + resource.TestCheckResourceAttrSet("redshift_datashare.basic", "created"), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.#", "1"), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.0.name", shareName), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.0.mode", "auto"), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.0.tables.#", "0"), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.0.functions.#", "0"), + ), + }, + { + // This test step creates some dummy objects and adds them to the datashare. + // This is done in raw SQL in the PreConfig function, for now, as the provider + // doesn't yet have resource definitions for tables/views/functions. + PreConfig: testAccRedshiftDatashareCreateObjects(t, shareName), + Config: config, + Check: resource.ComposeTestCheckFunc( + testAccCheckRedshiftDatashareExists(shareName), + resource.TestCheckResourceAttr("redshift_datashare.basic", "name", shareName), + resource.TestCheckResourceAttr("redshift_datashare.basic", "owner", shareName), + resource.TestCheckResourceAttr("redshift_datashare.basic", "publicly_accessible", "false"), + resource.TestCheckResourceAttrSet("redshift_datashare.basic", "producer_account"), + resource.TestCheckResourceAttrSet("redshift_datashare.basic", "producer_namespace"), + resource.TestCheckResourceAttrSet("redshift_datashare.basic", "created"), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.#", "1"), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.0.name", shareName), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.0.mode", "auto"), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.0.tables.#", "4"), + resource.TestCheckResourceAttr("redshift_datashare.basic", "schema.0.functions.#", "1"), + ), + }, + { + ResourceName: "redshift_datashare.basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccRedshiftDatashareCreateObjects(t *testing.T, schemaName string) func() { + return func() { + client := testAccProvider.Meta().(*Client) + tx, err := startTransaction(client, "") + if err != nil { + t.Errorf("Unable to start transaction: %w", err) + } + defer deferredRollback(tx) + + query := fmt.Sprintf(` +CREATE TABLE %[1]s.test_table (message varchar(max)); +CREATE VIEW %[1]s.test_view AS (SELECT message FROM %[1]s.test_table); +CREATE VIEW %[1]s.test_late_binding_view AS (SELECT * FROM %[1]s.test_view) WITH NO SCHEMA BINDING; +CREATE MATERIALIZED VIEW %[1]s.test_materialized_view BACKUP NO AUTO REFRESH NO AS (SELECT message FROM %[1]s.test_table); +CREATE FUNCTION %[1]s.test_echo (varchar(max)) + RETURNS varchar(max) +STABLE +AS $$ + SELECT $1 +$$ LANGUAGE sql;`, pq.QuoteIdentifier(schemaName)) + + if _, err := tx.Exec(query); err != nil { + t.Errorf("Unable to populate datashare schema objects: %w", err) + } + + if err = tx.Commit(); err != nil { + t.Errorf("Unable to commit transaction: %w", err) + } + } +} + +func testAccCheckRedshiftDatashareExists(shareName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*Client) + + exists, err := checkDatashareExists(client, shareName) + if err != nil { + return fmt.Errorf("Error checking datashare %s", err) + } + + if !exists { + return fmt.Errorf("Datashare not found") + } + + return nil + } +} + +func checkDatashareExists(client *Client, shareName string) (bool, error) { + db, err := client.Connect() + if err != nil { + return false, err + } + + var _rez int + err = db.QueryRow("SELECT 1 from svv_datashares WHERE share_type = 'OUTBOUND' AND share_name = $1", strings.ToLower(shareName)).Scan(&_rez) + + switch { + case err == sql.ErrNoRows: + return false, nil + case err != nil: + return false, fmt.Errorf("Error reading info about datashare: %w", err) + } + + return true, nil +} + +func testAccCheckRedshiftDatashareDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "redshift_datashare" { + continue + } + + exists, err := checkDatashareExists(client, rs.Primary.Attributes["name"]) + + if err != nil { + return fmt.Errorf("Error checking datashare %w", err) + } + + if exists { + return fmt.Errorf("Datashare still exists after destroy") + } + } + + return nil +} diff --git a/redshift/resource_redshift_schema.go b/redshift/resource_redshift_schema.go index f9b5740..e8bcbe5 100644 --- a/redshift/resource_redshift_schema.go +++ b/redshift/resource_redshift_schema.go @@ -499,11 +499,11 @@ func resourceRedshiftSchemaReadExternal(db *DBConnection, d *schema.ResourceData switch { case sourceType == "data_catalog_source": sourceConfiguration["region"] = ®ion - sourceConfiguration["iam_role_arns"], err = splitCsvAndTrim(iamRole) + sourceConfiguration["iam_role_arns"], err = splitCsvAndTrim(iamRole, ',') if err != nil { return fmt.Errorf("Error parsing iam_role_arns: %v", err) } - sourceConfiguration["catalog_role_arns"], err = splitCsvAndTrim(catalogRole) + sourceConfiguration["catalog_role_arns"], err = splitCsvAndTrim(catalogRole, ',') if err != nil { return fmt.Errorf("Error parsing catalog_role_arns: %v", err) } @@ -516,7 +516,7 @@ func resourceRedshiftSchemaReadExternal(db *DBConnection, d *schema.ResourceData } sourceConfiguration["port"] = &portNum } - sourceConfiguration["iam_role_arns"], err = splitCsvAndTrim(iamRole) + sourceConfiguration["iam_role_arns"], err = splitCsvAndTrim(iamRole, ',') if err != nil { return fmt.Errorf("Error parsing iam_role_arns: %v", err) } @@ -532,7 +532,7 @@ func resourceRedshiftSchemaReadExternal(db *DBConnection, d *schema.ResourceData if sourceSchema != "" { sourceConfiguration["schema"] = &sourceSchema } - sourceConfiguration["iam_role_arns"], err = splitCsvAndTrim(iamRole) + sourceConfiguration["iam_role_arns"], err = splitCsvAndTrim(iamRole, ',') if err != nil { return fmt.Errorf("Error parsing iam_role_arns: %v", err) } @@ -546,7 +546,7 @@ func resourceRedshiftSchemaReadExternal(db *DBConnection, d *schema.ResourceData } sourceConfiguration["port"] = &portNum } - sourceConfiguration["iam_role_arns"], err = splitCsvAndTrim(iamRole) + sourceConfiguration["iam_role_arns"], err = splitCsvAndTrim(iamRole, ',') if err != nil { return fmt.Errorf("Error parsing iam_role_arns: %v", err) } diff --git a/redshift/resource_redshift_schema_test.go b/redshift/resource_redshift_schema_test.go index 17357de..b862580 100644 --- a/redshift/resource_redshift_schema_test.go +++ b/redshift/resource_redshift_schema_test.go @@ -156,7 +156,7 @@ resource "redshift_user" "schema_dl_user1" { func TestAccRedshiftSchema_ExternalDataCatalog(t *testing.T) { dbName := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_DATA_CATALOG_DATABASE", t) iamRoleArnsRaw := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_DATA_CATALOG_IAM_ROLE_ARNS", t) - iamRoleArns, err := splitCsvAndTrim(iamRoleArnsRaw) + iamRoleArns, err := splitCsvAndTrim(iamRoleArnsRaw, ',') if err != nil { t.Errorf("REDSHIFT_EXTERNAL_SCHEMA_DATA_CATALOG_IAM_ROLE_ARNS could not be parsed: %v", err) } @@ -216,7 +216,7 @@ func TestAccRedshiftSchema_ExternalHive(t *testing.T) { dbName := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_HIVE_DATABASE", t) dbHostname := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_HIVE_HOSTNAME", t) iamRoleArnsRaw := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_HIVE_IAM_ROLE_ARNS", t) - iamRoleArns, err := splitCsvAndTrim(iamRoleArnsRaw) + iamRoleArns, err := splitCsvAndTrim(iamRoleArnsRaw, ',') if err != nil { t.Errorf("REDSHIFT_EXTERNAL_SCHEMA_DATA_CATALOG_IAM_ROLE_ARNS could not be parsed: %v", err) } @@ -286,7 +286,7 @@ func TestAccRedshiftSchema_ExternalRdsPostgres(t *testing.T) { dbName := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_RDS_POSTGRES_DATABASE", t) dbHostname := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_RDS_POSTGRES_HOSTNAME", t) iamRoleArnsRaw := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_RDS_POSTGRES_IAM_ROLE_ARNS", t) - iamRoleArns, err := splitCsvAndTrim(iamRoleArnsRaw) + iamRoleArns, err := splitCsvAndTrim(iamRoleArnsRaw, ',') if err != nil { t.Errorf("REDSHIFT_EXTERNAL_SCHEMA_RDS_POSTGRES_IAM_ROLE_ARNS could not be parsed: %v", err) } @@ -364,7 +364,7 @@ func TestAccRedshiftSchema_ExternalRdsMysql(t *testing.T) { dbName := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_RDS_MYSQL_DATABASE", t) dbHostname := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_RDS_MYSQL_HOSTNAME", t) iamRoleArnsRaw := getEnvOrSkip("REDSHIFT_EXTERNAL_SCHEMA_RDS_MYSQL_IAM_ROLE_ARNS", t) - iamRoleArns, err := splitCsvAndTrim(iamRoleArnsRaw) + iamRoleArns, err := splitCsvAndTrim(iamRoleArnsRaw, ',') if err != nil { t.Errorf("REDSHIFT_EXTERNAL_SCHEMA_RDS_MYSQL_IAM_ROLE_ARNS could not be parsed: %v", err) } From 598db51f1d5e9b3ffe701b05be7cd0898b15aa7a Mon Sep 17 00:00:00 2001 From: Steve Swor Date: Wed, 11 Aug 2021 23:46:04 +1000 Subject: [PATCH 2/5] throwaway commit. Partial implementation of datashare modify. Add/remove entire schema with all assets works. Modify manually-managed assets in datashare schema still not implemented. --- redshift/resource_redshift_datashare.go | 374 ++++++++++++++++++++++-- 1 file changed, 344 insertions(+), 30 deletions(-) diff --git a/redshift/resource_redshift_datashare.go b/redshift/resource_redshift_datashare.go index e6c653e..bd8c380 100644 --- a/redshift/resource_redshift_datashare.go +++ b/redshift/resource_redshift_datashare.go @@ -1,9 +1,11 @@ package redshift import ( + "bytes" "database/sql" "fmt" "log" + "sort" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -74,6 +76,7 @@ such as RA3. "schema": { Type: schema.TypeSet, Optional: true, + Computed: true, Description: "Defines which objects in the specified schema are exposed to the data share", Set: resourceRedshiftDatashareSchemaHash, Elem: &schema.Resource{ @@ -124,9 +127,38 @@ such as RA3. } func resourceRedshiftDatashareSchemaHash(v interface{}) int { - schemaResource := v.(map[string]interface{}) - schemaName := schemaResource["name"].(string) - return schema.HashString(schemaName) + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["mode"].(string))) + + // Sort the tables/functions sets to make the hash more deterministic + if v, ok := m["tables"]; ok { + vs := v.(*schema.Set).List() + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + + if v, ok := m["functions"]; ok { + vs := v.(*schema.Set).List() + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + return schema.HashString(buf.String()) } func resourceRedshiftDatashareExists(db *DBConnection, d *schema.ResourceData) (bool, error) { @@ -179,15 +211,7 @@ func resourceRedshiftDatashareCreate(db *DBConnection, d *schema.ResourceData) e } for _, schema := range d.Get("schema").(*schema.Set).List() { - err = resourceRedshiftDatashareAddSchema(tx, d, schema.(map[string]interface{})) - if err != nil { - return err - } - err = resourceRedshiftDatashareAddTables(tx, d, schema.(map[string]interface{})) - if err != nil { - return err - } - err = resourceRedshiftDatashareAddFunctions(tx, d, schema.(map[string]interface{})) + err = addSchemaToDatashare(tx, shareName, schema.(map[string]interface{})) if err != nil { return err } @@ -200,10 +224,22 @@ func resourceRedshiftDatashareCreate(db *DBConnection, d *schema.ResourceData) e return resourceRedshiftDatashareRead(db, d) } -func resourceRedshiftDatashareAddSchema(tx *sql.Tx, d *schema.ResourceData, schema map[string]interface{}) error { - shareName := d.Get("name").(string) - schemaName := schema["name"].(string) - mode := schema["mode"].(string) +func addSchemaToDatashare(tx *sql.Tx, shareName string, m map[string]interface{}) error { + err := resourceRedshiftDatashareAddSchema(tx, shareName, m) + if err != nil { + return err + } + err = resourceRedshiftDatashareAddTables(tx, shareName, m) + if err != nil { + return err + } + err = resourceRedshiftDatashareAddFunctions(tx, shareName, m) + return err +} + +func resourceRedshiftDatashareAddSchema(tx *sql.Tx, shareName string, m map[string]interface{}) error { + schemaName := m["name"].(string) + mode := m["mode"].(string) log.Println("[DEBUG] Adding schema to datashare") _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) if err != nil { @@ -218,10 +254,9 @@ func resourceRedshiftDatashareAddSchema(tx *sql.Tx, d *schema.ResourceData, sche return nil } -func resourceRedshiftDatashareAddTables(tx *sql.Tx, d *schema.ResourceData, schemaConfig map[string]interface{}) error { - shareName := d.Get("name").(string) - schemaName := schemaConfig["name"].(string) - mode := schemaConfig["mode"].(string) +func resourceRedshiftDatashareAddTables(tx *sql.Tx, shareName string, m map[string]interface{}) error { + schemaName := m["name"].(string) + mode := m["mode"].(string) switch mode { case "auto": log.Println("[DEBUG] Adding all tables to datashare") @@ -231,7 +266,7 @@ func resourceRedshiftDatashareAddTables(tx *sql.Tx, d *schema.ResourceData, sche } case "manual": log.Println("[DEBUG] Adding individual tables to datashare") - for _, table := range schemaConfig["tables"].(*schema.Set).List() { + for _, table := range m["tables"].(*schema.Set).List() { _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD TABLE %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(table.(string)))) if err != nil { return err @@ -243,10 +278,9 @@ func resourceRedshiftDatashareAddTables(tx *sql.Tx, d *schema.ResourceData, sche return nil } -func resourceRedshiftDatashareAddFunctions(tx *sql.Tx, d *schema.ResourceData, schemaConfig map[string]interface{}) error { - shareName := d.Get("name").(string) - schemaName := schemaConfig["name"].(string) - mode := schemaConfig["mode"].(string) +func resourceRedshiftDatashareAddFunctions(tx *sql.Tx, shareName string, m map[string]interface{}) error { + schemaName := m["name"].(string) + mode := m["mode"].(string) switch mode { case "auto": log.Println("[DEBUG] Adding all functions to datashare") @@ -256,7 +290,7 @@ func resourceRedshiftDatashareAddFunctions(tx *sql.Tx, d *schema.ResourceData, s } case "manual": log.Println("[DEBUG] Adding individual functions to datashare") - for _, table := range schemaConfig["functions"].(*schema.Set).List() { + for _, table := range m["functions"].(*schema.Set).List() { _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD FUNCTION %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(table.(string)))) if err != nil { return err @@ -268,6 +302,40 @@ func resourceRedshiftDatashareAddFunctions(tx *sql.Tx, d *schema.ResourceData, s return nil } +func removeSchemaFromDatashare(tx *sql.Tx, shareName string, m map[string]interface{}) error { + err := resourceRedshiftDatashareRemoveFunctions(tx, shareName, m) + if err != nil { + return err + } + err = resourceRedshiftDatashareRemoveTables(tx, shareName, m) + if err != nil { + return err + } + err = resourceRedshiftDatashareRemoveSchema(tx, shareName, m) + return err +} + +func resourceRedshiftDatashareRemoveFunctions(tx *sql.Tx, shareName string, m map[string]interface{}) error { + schemaName := m["name"].(string) + log.Println("[DEBUG] Removing all functions from datashare") + _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s REMOVE ALL FUNCTIONS IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + return err +} + +func resourceRedshiftDatashareRemoveTables(tx *sql.Tx, shareName string, m map[string]interface{}) error { + schemaName := m["name"].(string) + log.Println("[DEBUG] Removing all tables from datashare") + _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s REMOVE ALL TABLES IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + return err +} + +func resourceRedshiftDatashareRemoveSchema(tx *sql.Tx, shareName string, m map[string]interface{}) error { + schemaName := m["name"].(string) + log.Println("[DEBUG] Removing schema from datashare") + _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s REMOVE SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + return err +} + func resourceRedshiftDatashareRead(db *DBConnection, d *schema.ResourceData) error { var shareName, owner, producerAccount, producerNamespace, created string var publicAccessible bool @@ -381,20 +449,266 @@ func readDatashareSchemas(tx *sql.Tx, shareName string, d *schema.ResourceData) } // convert map to set - schemaSlice := make([]interface{}, 0) + schemas := schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) for _, schemaDef := range schemasByName { - schemaSlice = append(schemaSlice, schemaDef) + schemas.Add(schemaDef) } - schemas := schema.NewSet(resourceRedshiftDatashareSchemaHash, schemaSlice) d.Set("schema", schemas) return nil } func resourceRedshiftDatashareUpdate(db *DBConnection, d *schema.ResourceData) error { - // TODO implement + tx, err := startTransaction(db.client, "") + if err != nil { + return err + } + defer deferredRollback(tx) + + if err := setDatashareName(tx, d); err != nil { + return err + } + + if err := setDatashareOwner(tx, d); err != nil { + return err + } + + if err := setDatasharePubliclyAccessble(tx, d); err != nil { + return err + } + + if err := setDatashareSchemas(tx, d); err != nil { + return err + } + + if err = tx.Commit(); err != nil { + return fmt.Errorf("could not commit transaction: %w", err) + } + + return resourceRedshiftDatashareRead(db, d) +} + +func setDatashareName(tx *sql.Tx, d *schema.ResourceData) error { + if !d.HasChange("name") { + return nil + } + oldRaw, newRaw := d.GetChange("name") + oldValue := oldRaw.(string) + newValue := newRaw.(string) + if newValue == "" { + return fmt.Errorf("Error setting datashare name to an empty string") + } + query := fmt.Sprintf("ALTER DATASHARE %s RENAME TO %s", pq.QuoteIdentifier(oldValue), pq.QuoteIdentifier(newValue)) + if _, err := tx.Exec(query); err != nil { + return fmt.Errorf("Error updating datashare NAME :%w", err) + } return nil } +func setDatashareOwner(tx *sql.Tx, d *schema.ResourceData) error { + if !d.HasChange("owner") { + return nil + } + shareName := d.Get("name").(string) + _, newRaw := d.GetChange("owner") + newValue := newRaw.(string) + if newValue == "" { + newValue = "CURRENT_USER" + } else { + newValue = pq.QuoteIdentifier(newValue) + } + + query := fmt.Sprintf("ALTER DATASHARE %s OWNER TO %s", pq.QuoteIdentifier(shareName), newValue) + if _, err := tx.Exec(query); err != nil { + return fmt.Errorf("Error updating datashare OWNER :%w", err) + } + return nil +} + +func setDatasharePubliclyAccessble(tx *sql.Tx, d *schema.ResourceData) error { + if !d.HasChange("publicly_accessible") { + return nil + } + + shareName := d.Get("name").(string) + newValue := d.Get("publicly_accessible").(bool) + query := fmt.Sprintf("ALTER DATASHARE %s SET PUBLICACCESSIBLE %t", pq.QuoteIdentifier(shareName), newValue) + if _, err := tx.Exec(query); err != nil { + return fmt.Errorf("Error updating datashare PUBLICACCESSBILE :%w", err) + } + return nil +} + +func setDatashareSchemas(tx *sql.Tx, d *schema.ResourceData) error { + if !d.HasChange("schema") { + return nil + } + oldRaw, newRaw := d.GetChange("schema") + if oldRaw == nil { + oldRaw = schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) + } + if newRaw == nil { + newRaw = schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) + } + oldCollapsed, err := resourceRedshiftDatashareCollapseSchemas(oldRaw.(*schema.Set)) + if err != nil { + return err + } + newCollapsed, err := resourceRedshiftDatashareCollapseSchemas(newRaw.(*schema.Set)) + if err != nil { + return err + } + + add, remove, _ := computeDatashareSchemaChanges(oldCollapsed, newCollapsed) + shareName := d.Get("name").(string) + for _, s := range add.List() { + if err := addSchemaToDatashare(tx, shareName, s.(map[string]interface{})); err != nil { + return err + } + } + for _, s := range remove.List() { + if err := removeSchemaFromDatashare(tx, shareName, s.(map[string]interface{})); err != nil { + return err + } + } + + // For modifications, we need to see what's changed + //if err := updateDatashareSchemaObjects(tx, shareName, modify); err != nil { + // return err + //} + return nil +} + +// now we just need to deal with modifications to existing datashare schemas. + +/*oldExpanded := resourceRedshiftDatashareExpandSchemas(oldCollapsed) + log.Printf("[DEBUG] Old schemas: %#v\n", oldExpanded) + newExpanded := resourceRedshiftDatashareExpandSchemas(newCollapsed) + log.Printf("[DEBUG] New schemas: %#v\n", newExpanded) + + remove := oldExpanded.Difference(newExpanded).List() + log.Printf("[DEBUG] schemas to remove: %#v\n", remove) + for _, object := range remove { + log.Printf("[DEBUG] Remove %#v\n", object) + } + + add := newExpanded.Difference(oldExpanded).List() + log.Printf("[DEBUG] schemas to add: %#v\n", add) + for _, object := range add { + log.Printf("[DEBUG] Add %#v\n", object) + } + return nil +}*/ + +func computeDatashareSchemaChanges(old *schema.Set, new *schema.Set) (add *schema.Set, remove *schema.Set, modify *schema.Set) { + add = schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) + remove = schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) + modify = schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) + + oldNames := schema.NewSet(schema.HashString, nil) + for _, s := range old.List() { + m := s.(map[string]interface{}) + oldNames.Add(m["name"]) + } + newNames := schema.NewSet(schema.HashString, nil) + for _, s := range new.List() { + m := s.(map[string]interface{}) + newNames.Add(m["name"]) + } + removeNames := oldNames.Difference(newNames) + addNames := newNames.Difference(oldNames) + + // populate remove result + for _, s := range old.List() { + m := s.(map[string]interface{}) + if removeNames.Contains(m["name"]) { + remove.Add(s) + } + } + + // populate add/modify result + for _, s := range new.List() { + m := s.(map[string]interface{}) + if addNames.Contains(m["name"]) { + add.Add(s) + } else { + modify.Add(s) + } + } + + return +} + +func resourceRedshiftDatashareExpandSchemas(schemas *schema.Set) *schema.Set { + keysToExpand := []string{"tables", "functions"} + normalized := schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) + for _, rawObject := range schemas.List() { + m := rawObject.(map[string]interface{}) + for _, key := range keysToExpand { + item, exists := m[key] + if exists { + for _, v := range item.(*schema.Set).List() { + newV := schema.NewSet(schema.HashString, nil) + newV.Add(v) + newSchemaConfig := resourceRedshiftDatashareCopySchemaObject(m, key, newV) + normalized.Add(newSchemaConfig) + } + } + } + } + return normalized +} + +func resourceRedshiftDatashareCollapseSchemas(schemas *schema.Set) (*schema.Set, error) { + keysToCollapse := []string{"tables", "functions"} + schemasByName := make(map[string]map[string]interface{}) + for _, rawObject := range schemas.List() { + m := rawObject.(map[string]interface{}) + name := m["name"].(string) + current, found := schemasByName[name] + if !found { + schemasByName[name] = m + current = m + } else { + // Due to some weirdness with how schema.TypeSet hashing works, we can end up in a situation where we have + // multiple attribute blocks for the same datashare schema. + // We're fine as long as all of the blocks use the same mode. + if current["mode"] != m["mode"] { + return nil, fmt.Errorf("Found multiple schema declarations for schema %s with different modes.", name) + } + } + for _, key := range keysToCollapse { + if currentObjects, found := current[key]; found { + if objects, ok := m[key]; ok { + current[key] = currentObjects.(*schema.Set).Union(objects.(*schema.Set)) + } + } else { + if objects, ok := m[key]; ok { + current[key] = objects + } + } + } + } + results := schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) + for _, m := range schemasByName { + results.Add(m) + } + return results, nil +} + +func resourceRedshiftDatashareCopySchemaObject(src map[string]interface{}, k string, v interface{}) map[string]interface{} { + keysToCopy := []string{"name", "mode"} + dst := make(map[string]interface{}) + for _, key := range keysToCopy { + if val, ok := src[key]; ok { + dst[key] = val + } + } + if k != "" { + dst[k] = v + } + return dst +} + func resourceRedshiftDatashareDelete(db *DBConnection, d *schema.ResourceData) error { shareName := d.Get("name").(string) log.Println("[DEBUG] deleting datashare") From d993620a0549848b0665f482ce6275ecedaead8b Mon Sep 17 00:00:00 2001 From: Steve Swor Date: Thu, 12 Aug 2021 16:49:44 +1000 Subject: [PATCH 3/5] clean up data share examples --- docs/resources/datashare.md | 40 ++++++++++++++----- .../resources/redshift_datashare/resource.tf | 40 ++++++++++++++----- 2 files changed, 60 insertions(+), 20 deletions(-) diff --git a/docs/resources/datashare.md b/docs/resources/datashare.md index dde6dae..409294c 100644 --- a/docs/resources/datashare.md +++ b/docs/resources/datashare.md @@ -25,24 +25,24 @@ such as RA3. ## Example Usage ```terraform -# Example: data share which includes all tables/views and functions in the specified schema. -# New tables/views and functions are automatically added to the datashare. -resource "redshift_datashare" "datashare_auto" { - name = "my_automatic_datashare" # Required +# Example: Datashare that can only be consumed by a non-public Redshift cluster. +resource "redshift_datashare" "private_datashare" { + name = "my_private_datashare" # Required owner = "my_user" # Optional + # Example of adding a schema to a data share in "auto" mode. + # All tables/views and functions in the schema are added to the datashare, + # and redshift will automatically add newly-created tables/views and functions + # to the datashare without needing to re-run terraform. schema { name = "public" # Required mode = "auto" # Required } -} - -# Example: data share which explicitly specifies tables/views and functions -resource "redshift_datashare" "datashare_manual" { - name = "my_manual_datashare" # Required + # Example of ading a schema to a data share in "manual" mode. + # Only the specified tables/views and functions will be added to the data share. schema { - name = "public" # Required + name = "other" # Required mode = "manual" # Required tables = [ # Optional. If unspecified then no tables/views will be added. "my_table", @@ -61,10 +61,30 @@ resource "redshift_datashare" "publicly_accessible_datashare" { name = "my_public_datashare" # Required publicly_accessible = true # Optional. Default is `false` + # Example of adding a schema to a data share in "auto" mode. + # All tables/views and functions in the schema are added to the datashare, + # and redshift will automatically add newly-created tables/views and functions + # to the datashare without needing to re-run terraform. schema { name = "public" # Required mode = "auto" # Required } + + # Example of ading a schema to a data share in "manual" mode. + # Only the specified tables/views and functions will be added to the data share. + schema { + name = "other" # Required + mode = "manual" # Required + tables = [ # Optional. If unspecified then no tables/views will be added. + "my_table", + "my_view", + "my_late_binding_view", + "my_materialized_view", + ] + functions = [ # Optional. If unspecified then no functions will be added. + "my_sql_udf", + ] + } } ``` diff --git a/examples/resources/redshift_datashare/resource.tf b/examples/resources/redshift_datashare/resource.tf index 8694cd7..1ea39a5 100644 --- a/examples/resources/redshift_datashare/resource.tf +++ b/examples/resources/redshift_datashare/resource.tf @@ -1,21 +1,21 @@ -# Example: data share which includes all tables/views and functions in the specified schema. -# New tables/views and functions are automatically added to the datashare. -resource "redshift_datashare" "datashare_auto" { - name = "my_automatic_datashare" # Required +# Example: Datashare that can only be consumed by a non-public Redshift cluster. +resource "redshift_datashare" "private_datashare" { + name = "my_private_datashare" # Required owner = "my_user" # Optional + # Example of adding a schema to a data share in "auto" mode. + # All tables/views and functions in the schema are added to the datashare, + # and redshift will automatically add newly-created tables/views and functions + # to the datashare without needing to re-run terraform. schema { name = "public" # Required mode = "auto" # Required } -} - -# Example: data share which explicitly specifies tables/views and functions -resource "redshift_datashare" "datashare_manual" { - name = "my_manual_datashare" # Required + # Example of ading a schema to a data share in "manual" mode. + # Only the specified tables/views and functions will be added to the data share. schema { - name = "public" # Required + name = "other" # Required mode = "manual" # Required tables = [ # Optional. If unspecified then no tables/views will be added. "my_table", @@ -34,8 +34,28 @@ resource "redshift_datashare" "publicly_accessible_datashare" { name = "my_public_datashare" # Required publicly_accessible = true # Optional. Default is `false` + # Example of adding a schema to a data share in "auto" mode. + # All tables/views and functions in the schema are added to the datashare, + # and redshift will automatically add newly-created tables/views and functions + # to the datashare without needing to re-run terraform. schema { name = "public" # Required mode = "auto" # Required } + + # Example of ading a schema to a data share in "manual" mode. + # Only the specified tables/views and functions will be added to the data share. + schema { + name = "other" # Required + mode = "manual" # Required + tables = [ # Optional. If unspecified then no tables/views will be added. + "my_table", + "my_view", + "my_late_binding_view", + "my_materialized_view", + ] + functions = [ # Optional. If unspecified then no functions will be added. + "my_sql_udf", + ] + } } From 207c4b7c64369f4e6c890dc69accdf08046e6c1a Mon Sep 17 00:00:00 2001 From: Steve Swor Date: Thu, 12 Aug 2021 17:14:29 +1000 Subject: [PATCH 4/5] Throwaway commit. Compute modifications but do not execute them --- redshift/helpers.go | 18 +++++++++++++++++ redshift/resource_redshift_datashare.go | 26 +++++++++++++++++-------- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/redshift/helpers.go b/redshift/helpers.go index 985d132..8be463e 100644 --- a/redshift/helpers.go +++ b/redshift/helpers.go @@ -147,3 +147,21 @@ func splitCsvAndTrim(raw string, delimiter rune) ([]string, error) { } return result, nil } + +func setToMap(set *schema.Set, key string) map[string]map[string]interface{} { + result := make(map[string]map[string]interface{}) + for _, s := range set.List() { + m := s.(map[string]interface{}) + id := m[key].(string) + result[id] = m + } + return result +} + +func mapToSet(mapOfMaps map[string]map[string]interface{}, hashFunction schema.SchemaSetFunc) *schema.Set { + result := schema.NewSet(hashFunction, nil) + for _, m := range mapOfMaps { + result.Add(m) + } + return result +} diff --git a/redshift/resource_redshift_datashare.go b/redshift/resource_redshift_datashare.go index bd8c380..48d7a51 100644 --- a/redshift/resource_redshift_datashare.go +++ b/redshift/resource_redshift_datashare.go @@ -558,7 +558,7 @@ func setDatashareSchemas(tx *sql.Tx, d *schema.ResourceData) error { return err } - add, remove, _ := computeDatashareSchemaChanges(oldCollapsed, newCollapsed) + add, remove, modify := computeDatashareSchemaChanges(oldCollapsed, newCollapsed) shareName := d.Get("name").(string) for _, s := range add.List() { if err := addSchemaToDatashare(tx, shareName, s.(map[string]interface{})); err != nil { @@ -572,16 +572,26 @@ func setDatashareSchemas(tx *sql.Tx, d *schema.ResourceData) error { } // For modifications, we need to see what's changed - //if err := updateDatashareSchemaObjects(tx, shareName, modify); err != nil { - // return err - //} + oldCollapsedMap := setToMap(oldCollapsed, "name") + for _, s := range modify.List() { + after := s.(map[string]interface{}) + schemaName := after["name"].(string) + before := oldCollapsedMap[schemaName] + if err := updateDatashareSchemaObjects(tx, shareName, before, after); err != nil { + return err + } + } return nil } -// now we just need to deal with modifications to existing datashare schemas. - -/*oldExpanded := resourceRedshiftDatashareExpandSchemas(oldCollapsed) +func updateDatashareSchemaObjects(tx *sql.Tx, shareName string, before map[string]interface{}, after map[string]interface{}) error { + // now we just need to deal with modifications to existing datashare schemas. + oldCollapsed := schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) + oldCollapsed.Add(before) + oldExpanded := resourceRedshiftDatashareExpandSchemas(oldCollapsed) log.Printf("[DEBUG] Old schemas: %#v\n", oldExpanded) + newCollapsed := schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) + newCollapsed.Add(after) newExpanded := resourceRedshiftDatashareExpandSchemas(newCollapsed) log.Printf("[DEBUG] New schemas: %#v\n", newExpanded) @@ -597,7 +607,7 @@ func setDatashareSchemas(tx *sql.Tx, d *schema.ResourceData) error { log.Printf("[DEBUG] Add %#v\n", object) } return nil -}*/ +} func computeDatashareSchemaChanges(old *schema.Set, new *schema.Set) (add *schema.Set, remove *schema.Set, modify *schema.Set) { add = schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) From 34987b951dbeb098ee040ff426b54ac2bc7edb7b Mon Sep 17 00:00:00 2001 From: Steve Swor Date: Fri, 13 Aug 2021 11:19:50 +1000 Subject: [PATCH 5/5] fix update from manual to auto --- redshift/resource_redshift_datashare.go | 160 +++++++++++++++++++----- 1 file changed, 127 insertions(+), 33 deletions(-) diff --git a/redshift/resource_redshift_datashare.go b/redshift/resource_redshift_datashare.go index 48d7a51..b715c48 100644 --- a/redshift/resource_redshift_datashare.go +++ b/redshift/resource_redshift_datashare.go @@ -240,13 +240,16 @@ func addSchemaToDatashare(tx *sql.Tx, shareName string, m map[string]interface{} func resourceRedshiftDatashareAddSchema(tx *sql.Tx, shareName string, m map[string]interface{}) error { schemaName := m["name"].(string) mode := m["mode"].(string) - log.Println("[DEBUG] Adding schema to datashare") - _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + query := fmt.Sprintf("ALTER DATASHARE %s ADD SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName)) + log.Printf("[DEBUG] %s\n", query) + _, err := tx.Exec(query) if err != nil { return err } if mode == "auto" { - _, err = tx.Exec(fmt.Sprintf("ALTER DATASHARE %s SET INCLUDENEW = TRUE FOR SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + query = fmt.Sprintf("ALTER DATASHARE %s SET INCLUDENEW = TRUE FOR SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName)) + log.Printf("[DEBUG] %s\n", query) + _, err = tx.Exec(query) if err != nil { return err } @@ -259,15 +262,11 @@ func resourceRedshiftDatashareAddTables(tx *sql.Tx, shareName string, m map[stri mode := m["mode"].(string) switch mode { case "auto": - log.Println("[DEBUG] Adding all tables to datashare") - _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD ALL TABLES IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) - if err != nil { - return err - } + return resourceRedshiftDatashareAddAllTables(tx, shareName, schemaName) case "manual": log.Println("[DEBUG] Adding individual tables to datashare") - for _, table := range m["tables"].(*schema.Set).List() { - _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD TABLE %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(table.(string)))) + for _, tableName := range m["tables"].(*schema.Set).List() { + err := resourceRedshiftDatashareAddTable(tx, shareName, schemaName, tableName.(string)) if err != nil { return err } @@ -283,15 +282,11 @@ func resourceRedshiftDatashareAddFunctions(tx *sql.Tx, shareName string, m map[s mode := m["mode"].(string) switch mode { case "auto": - log.Println("[DEBUG] Adding all functions to datashare") - _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD ALL FUNCTIONS IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) - if err != nil { - return err - } + return resourceRedshiftDatashareAddAllFunctions(tx, shareName, schemaName) case "manual": log.Println("[DEBUG] Adding individual functions to datashare") - for _, table := range m["functions"].(*schema.Set).List() { - _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s ADD FUNCTION %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(table.(string)))) + for _, functionName := range m["functions"].(*schema.Set).List() { + err := resourceRedshiftDatashareAddFunction(tx, shareName, schemaName, functionName.(string)) if err != nil { return err } @@ -302,12 +297,40 @@ func resourceRedshiftDatashareAddFunctions(tx *sql.Tx, shareName string, m map[s return nil } +func resourceRedshiftDatashareAddAllFunctions(tx *sql.Tx, shareName string, schemaName string) error { + query := fmt.Sprintf("ALTER DATASHARE %s ADD ALL FUNCTIONS IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName)) + log.Printf("[DEBUG] %s", query) + _, err := tx.Exec(query) + return err +} + +func resourceRedshiftDatashareAddFunction(tx *sql.Tx, shareName string, schemaName string, functionName string) error { + query := fmt.Sprintf("ALTER DATASHARE %s ADD FUNCTION %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(functionName)) + log.Printf("[DEBUG] %s\n", query) + _, err := tx.Exec(query) + return err +} + +func resourceRedshiftDatashareAddAllTables(tx *sql.Tx, shareName string, schemaName string) error { + query := fmt.Sprintf("ALTER DATASHARE %s ADD ALL TABLES IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName)) + log.Printf("[DEBUG] %s\n", query) + _, err := tx.Exec(query) + return err +} + +func resourceRedshiftDatashareAddTable(tx *sql.Tx, shareName string, schemaName string, tableName string) error { + query := fmt.Sprintf("ALTER DATASHARE %s ADD TABLE %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(tableName)) + log.Printf("[DEBUG] %s\n", query) + _, err := tx.Exec(query) + return err +} + func removeSchemaFromDatashare(tx *sql.Tx, shareName string, m map[string]interface{}) error { - err := resourceRedshiftDatashareRemoveFunctions(tx, shareName, m) + err := resourceRedshiftDatashareRemoveAllFunctions(tx, shareName, m) if err != nil { return err } - err = resourceRedshiftDatashareRemoveTables(tx, shareName, m) + err = resourceRedshiftDatashareRemoveAllTables(tx, shareName, m) if err != nil { return err } @@ -315,24 +338,41 @@ func removeSchemaFromDatashare(tx *sql.Tx, shareName string, m map[string]interf return err } -func resourceRedshiftDatashareRemoveFunctions(tx *sql.Tx, shareName string, m map[string]interface{}) error { +func resourceRedshiftDatashareRemoveAllFunctions(tx *sql.Tx, shareName string, m map[string]interface{}) error { schemaName := m["name"].(string) - log.Println("[DEBUG] Removing all functions from datashare") - _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s REMOVE ALL FUNCTIONS IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + query := fmt.Sprintf("ALTER DATASHARE %s REMOVE ALL FUNCTIONS IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName)) + log.Printf("[DEBUG] %s\n", query) + _, err := tx.Exec(query) return err } -func resourceRedshiftDatashareRemoveTables(tx *sql.Tx, shareName string, m map[string]interface{}) error { +func resourceRedshiftDatashareRemoveFunction(tx *sql.Tx, shareName string, schemaName string, functionName string) error { + query := fmt.Sprintf("ALTER DATASHARE %s REMOVE FUNCTION %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(functionName)) + log.Printf("[DEBUG] %s\n", query) + _, err := tx.Exec(query) + return err +} + +func resourceRedshiftDatashareRemoveAllTables(tx *sql.Tx, shareName string, m map[string]interface{}) error { schemaName := m["name"].(string) - log.Println("[DEBUG] Removing all tables from datashare") - _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s REMOVE ALL TABLES IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + query := fmt.Sprintf("ALTER DATASHARE %s REMOVE ALL TABLES IN SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName)) + log.Printf("[DEBUG] %s\n", query) + _, err := tx.Exec(query) + return err +} + +func resourceRedshiftDatashareRemoveTable(tx *sql.Tx, shareName string, schemaName string, tableName string) error { + query := fmt.Sprintf("ALTER DATASHARE %s REMOVE TABLE %s.%s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName), pq.QuoteIdentifier(tableName)) + log.Printf("[DEBUG] %s\n", query) + _, err := tx.Exec(query) return err } func resourceRedshiftDatashareRemoveSchema(tx *sql.Tx, shareName string, m map[string]interface{}) error { schemaName := m["name"].(string) - log.Println("[DEBUG] Removing schema from datashare") - _, err := tx.Exec(fmt.Sprintf("ALTER DATASHARE %s REMOVE SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName))) + query := fmt.Sprintf("ALTER DATASHARE %s REMOVE SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName)) + log.Printf("[DEBUG] %s\n", query) + _, err := tx.Exec(query) return err } @@ -586,25 +626,79 @@ func setDatashareSchemas(tx *sql.Tx, d *schema.ResourceData) error { func updateDatashareSchemaObjects(tx *sql.Tx, shareName string, before map[string]interface{}, after map[string]interface{}) error { // now we just need to deal with modifications to existing datashare schemas. + schemaName := after["name"].(string) + if strings.ToLower(after["mode"].(string)) == "auto" { + log.Printf("[INFO] Changing schema %s in datashare %s from manual mode to auto mode\n", schemaName, shareName) + // short-circuit the complicated logic below because we're adding all tables/functions. + query := fmt.Sprintf("ALTER DATASHARE %s SET INCLUDENEW = TRUE FOR SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName)) + log.Printf("[DEBUG] %s\n", query) + if _, err := tx.Exec(query); err != nil { + return err + } + err := resourceRedshiftDatashareAddAllTables(tx, shareName, schemaName) + if err != nil { + return err + } + return resourceRedshiftDatashareAddAllFunctions(tx, shareName, schemaName) + } + // manual mode. Process individual table/view changes. + if strings.ToLower(before["mode"].(string)) == "auto" { + log.Printf("[INFO] Changing schema %s in datashare %s from auto mode to manual mode\n", schemaName, shareName) + query := fmt.Sprintf("ALTER DATASHARE %s SET INCLUDENEW = FALSE FOR SCHEMA %s", pq.QuoteIdentifier(shareName), pq.QuoteIdentifier(schemaName)) + log.Printf("[DEBUG] %s\n", query) + if _, err := tx.Exec(query); err != nil { + return err + } + } oldCollapsed := schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) oldCollapsed.Add(before) oldExpanded := resourceRedshiftDatashareExpandSchemas(oldCollapsed) - log.Printf("[DEBUG] Old schemas: %#v\n", oldExpanded) newCollapsed := schema.NewSet(resourceRedshiftDatashareSchemaHash, nil) newCollapsed.Add(after) newExpanded := resourceRedshiftDatashareExpandSchemas(newCollapsed) - log.Printf("[DEBUG] New schemas: %#v\n", newExpanded) remove := oldExpanded.Difference(newExpanded).List() - log.Printf("[DEBUG] schemas to remove: %#v\n", remove) for _, object := range remove { - log.Printf("[DEBUG] Remove %#v\n", object) + m := object.(map[string]interface{}) + schemaName := m["name"].(string) + if tables, ok := m["tables"]; ok { + for _, tableName := range tables.(*schema.Set).List() { + err := resourceRedshiftDatashareRemoveTable(tx, shareName, schemaName, tableName.(string)) + if err != nil { + return err + } + } + } + if functions, ok := m["functions"]; ok { + for _, functionName := range functions.(*schema.Set).List() { + err := resourceRedshiftDatashareRemoveFunction(tx, shareName, schemaName, functionName.(string)) + if err != nil { + return err + } + } + } } add := newExpanded.Difference(oldExpanded).List() - log.Printf("[DEBUG] schemas to add: %#v\n", add) for _, object := range add { - log.Printf("[DEBUG] Add %#v\n", object) + m := object.(map[string]interface{}) + schemaName := m["name"].(string) + if tables, ok := m["tables"]; ok { + for _, tableName := range tables.(*schema.Set).List() { + err := resourceRedshiftDatashareAddTable(tx, shareName, schemaName, tableName.(string)) + if err != nil { + return err + } + } + } + if functions, ok := m["functions"]; ok { + for _, functionName := range functions.(*schema.Set).List() { + err := resourceRedshiftDatashareAddFunction(tx, shareName, schemaName, functionName.(string)) + if err != nil { + return err + } + } + } } return nil }