diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index 0975f7ce8b7a..9d85334c167b 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -153,7 +153,7 @@ func FlattenHDInsightsConfigurations(input map[string]*string) []interface{} { func SchemaHDInsightsStorageAccounts() *schema.Schema { return &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "storage_account_key": { @@ -179,20 +179,60 @@ func SchemaHDInsightsStorageAccounts() *schema.Schema { } } -func ExpandHDInsightsStorageAccounts(input []interface{}) (*[]hdinsight.StorageAccount, error) { +func SchemaHDInsightsGen2StorageAccounts() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + // HDInsight doesn't seem to allow adding more than one gen2 cluster right now. + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateResourceID, + }, + "filesystem_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.NoEmptyStrings, + }, + "managed_identity_resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateResourceID, + }, + "is_default": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + }, + }, + }, + } +} + +// ExpandHDInsightsStorageAccounts returns an array of StorageAccount structs, as well as a ClusterIdentity +// populated with any managed identities required for accessing Data Lake Gen2 storage. +func ExpandHDInsightsStorageAccounts(storageAccounts []interface{}, gen2storageAccounts []interface{}) (*[]hdinsight.StorageAccount, *hdinsight.ClusterIdentity, error) { results := make([]hdinsight.StorageAccount, 0) - for _, vs := range input { + var clusterIndentity *hdinsight.ClusterIdentity + + for _, vs := range storageAccounts { v := vs.(map[string]interface{}) storageAccountKey := v["storage_account_key"].(string) - storageContainerId := v["storage_container_id"].(string) + storageContainerID := v["storage_container_id"].(string) isDefault := v["is_default"].(bool) - // https://foo.blob.core.windows.net/example - uri, err := url.Parse(storageContainerId) + uri, err := url.Parse(storageContainerID) + if err != nil { - return nil, fmt.Errorf("Error parsing %q: %s", storageContainerId, err) + return nil, nil, fmt.Errorf("Error parsing %q: %s", storageContainerID, err) } result := hdinsight.StorageAccount{ @@ -204,7 +244,41 @@ func ExpandHDInsightsStorageAccounts(input []interface{}) (*[]hdinsight.StorageA results = append(results, result) } - return &results, nil + for _, vs := range gen2storageAccounts { + v := vs.(map[string]interface{}) + + fileSystemID := v["filesystem_id"].(string) + storageResourceID := v["storage_resource_id"].(string) + managedIdentityResourceID := v["managed_identity_resource_id"].(string) + + isDefault := v["is_default"].(bool) + + uri, err := url.Parse(fileSystemID) + if err != nil { + return nil, nil, fmt.Errorf("Error parsing %q: %s", fileSystemID, err) + } + + if clusterIndentity == nil { + clusterIndentity = &hdinsight.ClusterIdentity{ + Type: hdinsight.UserAssigned, + UserAssignedIdentities: make(map[string]*hdinsight.ClusterIdentityUserAssignedIdentitiesValue), + } + } + + // ... API doesn't seem to require client_id or principal_id, so pass in an empty ClusterIdentityUserAssignedIdentitiesValue + clusterIndentity.UserAssignedIdentities[managedIdentityResourceID] = &hdinsight.ClusterIdentityUserAssignedIdentitiesValue{} + + result := hdinsight.StorageAccount{ + Name: utils.String(uri.Host), // https://storageaccountname.dfs.core.windows.net/filesystemname -> storageaccountname.dfs.core.windows.net + ResourceID: utils.String(storageResourceID), + FileSystem: utils.String(uri.Path[1:]), // https://storageaccountname.dfs.core.windows.net/filesystemname -> filesystemname + MsiResourceID: utils.String(managedIdentityResourceID), + IsDefault: utils.Bool(isDefault), + } + results = append(results, result) + } + + return &results, clusterIndentity, nil } type HDInsightNodeDefinition struct { diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index aa749d3f163f..7057ae67d269 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -93,6 +93,8 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -184,7 +186,8 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -231,7 +234,8 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index d6e4eb416185..add6454f99ff 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -374,6 +374,80 @@ func TestAccAzureRMHDInsightHadoopCluster_addEdgeNodeBasic(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_gen2storage(t *testing.T) { + resourceName := "azurerm_hdinsight_hadoop_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hadoop_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_gen2storage(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + +func TestAccAzureRMHDInsightHadoopCluster_gen2AndBlobStorage(t *testing.T) { + resourceName := "azurerm_hdinsight_hadoop_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hadoop_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_gen2AndBlobStorage(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func testAccAzureRMHDInsightHadoopCluster_basic(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) return fmt.Sprintf(` @@ -766,6 +840,122 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { `, template, rInt, numEdgeNodes, instanceType) } +func testAccAzureRMHDInsightHadoopCluster_gen2storage(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightHadoopCluster_gen2template(rInt, rString, location) + return fmt.Sprintf(` +%s +resource "azurerm_hdinsight_hadoop_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + component_version { + hadoop = "2.7" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + +func testAccAzureRMHDInsightHadoopCluster_gen2AndBlobStorage(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightHadoopCluster_gen2template(rInt, rString, location) + + return fmt.Sprintf(` +%s +resource "azurerm_storage_account" "test" { + name = "acctestsa%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_container" "test" { + name = "acctest" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" +} + +resource "azurerm_hdinsight_hadoop_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + component_version { + hadoop = "2.7" + } + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + storage_account { + storage_container_id = "${azurerm_storage_container.test.id}" + storage_account_key = "${azurerm_storage_account.test.primary_access_key}" + is_default = false + } + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rString, rInt) +} + func testAccAzureRMHDInsightHadoopCluster_template(rInt int, rString string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { @@ -783,9 +973,50 @@ resource "azurerm_storage_account" "test" { resource "azurerm_storage_container" "test" { name = "acctest" - resource_group_name = "${azurerm_resource_group.test.name}" storage_account_name = "${azurerm_storage_account.test.name}" container_access_type = "private" } + +`, rInt, location, rString) +} + +func testAccAzureRMHDInsightHadoopCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "gen2test" { + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + +data "azurerm_subscription" "primary" {} + + +resource "azurerm_role_assignment" "test" { + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} + `, rInt, location, rString) } diff --git a/azurerm/resource_arm_hdinsight_hbase_cluster.go b/azurerm/resource_arm_hdinsight_hbase_cluster.go index 2c0a90f2b4d2..67fbf0e7dbd5 100644 --- a/azurerm/resource_arm_hdinsight_hbase_cluster.go +++ b/azurerm/resource_arm_hdinsight_hbase_cluster.go @@ -87,6 +87,8 @@ func resourceArmHDInsightHBaseCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -136,7 +138,8 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -183,7 +186,8 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_hbase_cluster_test.go b/azurerm/resource_arm_hdinsight_hbase_cluster_test.go index 93bc42b0d06c..e528c3bdb274 100644 --- a/azurerm/resource_arm_hdinsight_hbase_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hbase_cluster_test.go @@ -48,6 +48,43 @@ func TestAccAzureRMHDInsightHBaseCluster_basic(t *testing.T) { }) } +func TestAccAzureRMHDInsightHBaseCluster_gen2basic(t *testing.T) { + resourceName := "azurerm_hdinsight_hbase_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hbase_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHBaseCluster_gen2basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func TestAccAzureRMHDInsightHBaseCluster_requiresImport(t *testing.T) { if !features.ShouldResourcesBeImported() { t.Skip("Skipping since resources aren't required to be imported") @@ -302,6 +339,59 @@ resource "azurerm_hdinsight_hbase_cluster" "test" { `, template, rInt) } +func testAccAzureRMHDInsightHBaseCluster_gen2basic(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightHBaseCluster_gen2template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_hbase_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + hbase = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightHBaseCluster_requiresImport(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightHBaseCluster_basic(rInt, rString, location) return fmt.Sprintf(` @@ -604,3 +694,44 @@ resource "azurerm_storage_container" "test" { } `, rInt, location, rString) } + +func testAccAzureRMHDInsightHBaseCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "gen2test" { + depends_on = [azurerm_role_assignment.test] + + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + +data "azurerm_subscription" "primary" {} + +resource "azurerm_role_assignment" "test" { + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} +`, rInt, location, rString) +} diff --git a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go index 28a975993cb7..1177e7780c07 100644 --- a/azurerm/resource_arm_hdinsight_interactive_query_cluster.go +++ b/azurerm/resource_arm_hdinsight_interactive_query_cluster.go @@ -87,6 +87,8 @@ func resourceArmHDInsightInteractiveQueryCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -136,7 +138,8 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -183,7 +186,8 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go b/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go index 2d0d1b3bd80d..54841b63434c 100644 --- a/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_interactive_query_cluster_test.go @@ -48,6 +48,43 @@ func TestAccAzureRMHDInsightInteractiveQueryCluster_basic(t *testing.T) { }) } +func TestAccAzureRMHDInsightInteractiveQueryCluster_gen2basic(t *testing.T) { + resourceName := "azurerm_hdinsight_interactive_query_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_interactive_query_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_gen2basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func TestAccAzureRMHDInsightInteractiveQueryCluster_requiresImport(t *testing.T) { if !features.ShouldResourcesBeImported() { t.Skip("Skipping since resources aren't required to be imported") @@ -302,6 +339,61 @@ resource "azurerm_hdinsight_interactive_query_cluster" "test" { `, template, rInt) } +func testAccAzureRMHDInsightInteractiveQueryCluster_gen2basic(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightInteractiveQueryCluster_gen2template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_interactive_query_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + interactive_hive = "2.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D14_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_A4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightInteractiveQueryCluster_requiresImport(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightInteractiveQueryCluster_basic(rInt, rString, location) return fmt.Sprintf(` @@ -604,3 +696,42 @@ resource "azurerm_storage_container" "test" { } `, rInt, location, rString) } + +func testAccAzureRMHDInsightInteractiveQueryCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "gen2test" { + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + +data "azurerm_subscription" "primary" {} + +resource "azurerm_role_assignment" "test" { + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} +`, rInt, location, rString) +} diff --git a/azurerm/resource_arm_hdinsight_kafka_cluster.go b/azurerm/resource_arm_hdinsight_kafka_cluster.go index d5a7f69baca0..a620196084aa 100644 --- a/azurerm/resource_arm_hdinsight_kafka_cluster.go +++ b/azurerm/resource_arm_hdinsight_kafka_cluster.go @@ -88,6 +88,8 @@ func resourceArmHDInsightKafkaCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -137,7 +139,8 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -184,7 +187,8 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_kafka_cluster_test.go b/azurerm/resource_arm_hdinsight_kafka_cluster_test.go index 866efb9ccc6b..56af36c49311 100644 --- a/azurerm/resource_arm_hdinsight_kafka_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_kafka_cluster_test.go @@ -48,6 +48,43 @@ func TestAccAzureRMHDInsightKafkaCluster_basic(t *testing.T) { }) } +func TestAccAzureRMHDInsightKafkaCluster_gen2storage(t *testing.T) { + resourceName := "azurerm_hdinsight_kafka_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_kafka_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightKafkaCluster_gen2storage(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func TestAccAzureRMHDInsightKafkaCluster_requiresImport(t *testing.T) { if !features.ShouldResourcesBeImported() { t.Skip("Skipping since resources aren't required to be imported") @@ -303,6 +340,62 @@ resource "azurerm_hdinsight_kafka_cluster" "test" { `, template, rInt) } +func testAccAzureRMHDInsightKafkaCluster_gen2storage(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightKafkaCluster_gen2template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_kafka_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + kafka = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 3 + number_of_disks_per_node = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightKafkaCluster_requiresImport(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightKafkaCluster_basic(rInt, rString, location) return fmt.Sprintf(` @@ -609,3 +702,42 @@ resource "azurerm_storage_container" "test" { } `, rInt, location, rString) } + +func testAccAzureRMHDInsightKafkaCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "gen2test" { + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + +data "azurerm_subscription" "primary" {} + +resource "azurerm_role_assignment" "test" { + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} +`, rInt, location, rString) +} diff --git a/azurerm/resource_arm_hdinsight_ml_services_cluster.go b/azurerm/resource_arm_hdinsight_ml_services_cluster.go index 2f9ba98a4044..ca469e26b7a9 100644 --- a/azurerm/resource_arm_hdinsight_ml_services_cluster.go +++ b/azurerm/resource_arm_hdinsight_ml_services_cluster.go @@ -153,7 +153,7 @@ func resourceArmHDInsightMLServicesClusterCreate(d *schema.ResourceData, meta in gateway := expandHDInsightsMLServicesConfigurations(gatewayRaw, rStudio) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, nil) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -200,7 +200,8 @@ func resourceArmHDInsightMLServicesClusterCreate(d *schema.ResourceData, meta in Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_rserver_cluster.go b/azurerm/resource_arm_hdinsight_rserver_cluster.go index 8f67f44c5d7d..4a7db4ffbcd4 100644 --- a/azurerm/resource_arm_hdinsight_rserver_cluster.go +++ b/azurerm/resource_arm_hdinsight_rserver_cluster.go @@ -153,7 +153,7 @@ func resourceArmHDInsightRServerClusterCreate(d *schema.ResourceData, meta inter gateway := expandHDInsightsRServerConfigurations(gatewayRaw, rStudio) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, nil) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -200,7 +200,8 @@ func resourceArmHDInsightRServerClusterCreate(d *schema.ResourceData, meta inter Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_spark_cluster.go b/azurerm/resource_arm_hdinsight_spark_cluster.go index 5d44a2fca2ab..8056b4fb2869 100644 --- a/azurerm/resource_arm_hdinsight_spark_cluster.go +++ b/azurerm/resource_arm_hdinsight_spark_cluster.go @@ -87,6 +87,8 @@ func resourceArmHDInsightSparkCluster() *schema.Resource { "storage_account": azure.SchemaHDInsightsStorageAccounts(), + "storage_account_gen2": azure.SchemaHDInsightsGen2StorageAccounts(), + "roles": { Type: schema.TypeList, Required: true, @@ -136,7 +138,8 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -183,7 +186,8 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/azurerm/resource_arm_hdinsight_spark_cluster_test.go b/azurerm/resource_arm_hdinsight_spark_cluster_test.go index e8d74dfe6d93..aad2892d06b8 100644 --- a/azurerm/resource_arm_hdinsight_spark_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_spark_cluster_test.go @@ -48,6 +48,43 @@ func TestAccAzureRMHDInsightSparkCluster_basic(t *testing.T) { }) } +func TestAccAzureRMHDInsightSparkCluster_gen2basic(t *testing.T) { + resourceName := "azurerm_hdinsight_spark_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_spark_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightSparkCluster_gen2basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func TestAccAzureRMHDInsightSparkCluster_requiresImport(t *testing.T) { if !features.ShouldResourcesBeImported() { t.Skip("Skipping since resources aren't required to be imported") @@ -302,6 +339,61 @@ resource "azurerm_hdinsight_spark_cluster" "test" { `, template, rInt) } +func testAccAzureRMHDInsightSparkCluster_gen2basic(rInt int, rString string, location string) string { + template := testAccAzureRMHDInsightSparkCluster_gen2template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_spark_cluster" "test" { + depends_on = [azurerm_role_assignment.test] + + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + spark = "2.3" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account_gen2 { + storage_resource_id = azurerm_storage_account.gen2test.id + filesystem_id = azurerm_storage_data_lake_gen2_filesystem.gen2test.id + managed_identity_resource_id = azurerm_user_assigned_identity.test.id + is_default = true + } + + roles { + head_node { + vm_size = "Standard_A4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_A4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 3 + } + + zookeeper_node { + vm_size = "Medium" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } +} +`, template, rInt) +} + func testAccAzureRMHDInsightSparkCluster_requiresImport(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightSparkCluster_basic(rInt, rString, location) return fmt.Sprintf(` @@ -604,3 +696,42 @@ resource "azurerm_storage_container" "test" { } `, rInt, location, rString) } + +func testAccAzureRMHDInsightSparkCluster_gen2template(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_storage_account" "gen2test" { + name = "accgen2test%s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + is_hns_enabled = true +} + +resource "azurerm_storage_data_lake_gen2_filesystem" "gen2test" { + name = "acctest" + storage_account_id = azurerm_storage_account.gen2test.id +} + +resource "azurerm_user_assigned_identity" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + + name = "test-identity" +} + +data "azurerm_subscription" "primary" {} + +resource "azurerm_role_assignment" "test" { + scope = "${data.azurerm_subscription.primary.id}" + role_definition_name = "Storage Blob Data Owner" + principal_id = "${azurerm_user_assigned_identity.test.principal_id}" +} +`, rInt, location, rString) +} diff --git a/azurerm/resource_arm_hdinsight_storm_cluster.go b/azurerm/resource_arm_hdinsight_storm_cluster.go index 922fe3f9f237..bc9123aa188b 100644 --- a/azurerm/resource_arm_hdinsight_storm_cluster.go +++ b/azurerm/resource_arm_hdinsight_storm_cluster.go @@ -137,7 +137,7 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa gateway := azure.ExpandHDInsightsConfigurations(gatewayRaw) storageAccountsRaw := d.Get("storage_account").([]interface{}) - storageAccounts, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw) + storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, nil) if err != nil { return fmt.Errorf("Error expanding `storage_account`: %s", err) } @@ -184,7 +184,8 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa Roles: roles, }, }, - Tags: tags.Expand(t), + Tags: tags.Expand(t), + Identity: identity, } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { diff --git a/website/docs/r/hdinsight_hadoop_cluster.html.markdown b/website/docs/r/hdinsight_hadoop_cluster.html.markdown index 81febbfb75da..05ccb7ccd802 100644 --- a/website/docs/r/hdinsight_hadoop_cluster.html.markdown +++ b/website/docs/r/hdinsight_hadoop_cluster.html.markdown @@ -100,6 +100,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight Hadoop Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -162,7 +164,7 @@ A `storage_account` block supports the following: * `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. * `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. @@ -172,6 +174,22 @@ A `storage_account` block supports the following: --- +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. + +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. + +--- + A `worker_node` block supports the following: * `username` - (Required) The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created. diff --git a/website/docs/r/hdinsight_hbase_cluster.html.markdown b/website/docs/r/hdinsight_hbase_cluster.html.markdown index fff3c5dccc43..a8704189fe5a 100644 --- a/website/docs/r/hdinsight_hbase_cluster.html.markdown +++ b/website/docs/r/hdinsight_hbase_cluster.html.markdown @@ -100,6 +100,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight HBase Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -158,9 +160,9 @@ A `roles` block supports the following: A `storage_account` block supports the following: -* `is_default` - (Required) Is this the Default Storage Account for the HDInsight HBase Cluster? Changing this forces a new resource to be created. +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. * `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. @@ -170,6 +172,22 @@ A `storage_account` block supports the following: --- +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. + +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. + +--- + A `worker_node` block supports the following: * `username` - (Required) The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created. diff --git a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown index 956778b827e8..3faf0aec08f2 100644 --- a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown +++ b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown @@ -100,6 +100,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight Interactive Query Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -160,9 +162,9 @@ A `roles` block supports the following: A `storage_account` block supports the following: -* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Interactive Query Cluster? Changing this forces a new resource to be created. +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. * `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. @@ -172,6 +174,22 @@ A `storage_account` block supports the following: --- +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. + +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. + +--- + A `worker_node` block supports the following: * `username` - (Required) The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created. diff --git a/website/docs/r/hdinsight_kafka_cluster.html.markdown b/website/docs/r/hdinsight_kafka_cluster.html.markdown index 247cee802ecc..83a3a5e3f4d6 100644 --- a/website/docs/r/hdinsight_kafka_cluster.html.markdown +++ b/website/docs/r/hdinsight_kafka_cluster.html.markdown @@ -101,6 +101,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight Kafka Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -159,9 +161,9 @@ A `roles` block supports the following: A `storage_account` block supports the following: -* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Kafka Cluster? Changing this forces a new resource to be created. +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. * `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. @@ -171,6 +173,22 @@ A `storage_account` block supports the following: --- +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. + +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. + +--- + A `worker_node` block supports the following: * `number_of_disks_per_node` - (Required) The number of Data Disks which should be assigned to each Worker Node, which can be between 1 and 8. Changing this forces a new resource to be created. diff --git a/website/docs/r/hdinsight_spark_cluster.html.markdown b/website/docs/r/hdinsight_spark_cluster.html.markdown index 0adbc0258106..e796b2e0c146 100644 --- a/website/docs/r/hdinsight_spark_cluster.html.markdown +++ b/website/docs/r/hdinsight_spark_cluster.html.markdown @@ -100,6 +100,8 @@ The following arguments are supported: * `storage_account` - (Required) One or more `storage_account` block as defined below. +* `storage_account_gen2` - (Required) A `storage_account_gen2` block as defined below. + * `tier` - (Required) Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created. --- @@ -158,9 +160,9 @@ A `roles` block supports the following: A `storage_account` block supports the following: -* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Spark Cluster? Changing this forces a new resource to be created. +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. --> **NOTE:** One of the `storage_account` blocks must be marked as the default. +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. * `storage_account_key` - (Required) The Access Key which should be used to connect to the Storage Account. Changing this forces a new resource to be created. @@ -170,6 +172,22 @@ A `storage_account` block supports the following: --- +A `storage_account_gen2` block supports the following: + +* `is_default` - (Required) Is this the Default Storage Account for the HDInsight Hadoop Cluster? Changing this forces a new resource to be created. + +-> **NOTE:** One of the `storage_account` or `storage_account_gen2` blocks must be marked as the default. + +* `storage_resource_id` - (Required) The ID of the Storage Account. Changing this forces a new resource to be created. + +* `filesystem_id` - (Required) The ID of the Gen2 Filesystem. Changing this forces a new resource to be created. + +* `managed_identity_resource_id` - (Required) The ID of Managed Identity to use for accessing the Gen2 filesystem. Changing this forces a new resource to be created. + +-> **NOTE:** This can be obtained from the `id` of the `azurerm_storage_container` resource. + +--- + A `worker_node` block supports the following: * `username` - (Required) The Username of the local administrator for the Worker Nodes. Changing this forces a new resource to be created.