diff --git a/azurerm/common_hdinsight.go b/azurerm/common_hdinsight.go index 64f9e2b41370..f1db69841f44 100644 --- a/azurerm/common_hdinsight.go +++ b/azurerm/common_hdinsight.go @@ -1,10 +1,13 @@ package azurerm import ( + "context" "fmt" "log" + "time" "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" @@ -36,13 +39,13 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema } } - if d.HasChange("roles") { + if d.HasChange("roles.0.worker_node") { log.Printf("[DEBUG] Resizing the HDInsight %q Cluster", clusterKind) rolesRaw := d.Get("roles").([]interface{}) roles := rolesRaw[0].(map[string]interface{}) - headNodes := roles["worker_node"].([]interface{}) - headNode := headNodes[0].(map[string]interface{}) - targetInstanceCount := headNode["target_instance_count"].(int) + workerNodes := roles["worker_node"].([]interface{}) + workerNode := workerNodes[0].(map[string]interface{}) + targetInstanceCount := workerNode["target_instance_count"].(int) params := hdinsight.ClusterResizeParameters{ TargetInstanceCount: utils.Int32(int32(targetInstanceCount)), } @@ -57,6 +60,50 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema } } + // The API can add an edge node but can't remove them without force newing the resource. We'll check for adding here + // and can come back to removing if that functionality gets added. https://feedback.azure.com/forums/217335-hdinsight/suggestions/5663773-start-stop-cluster-hdinsight?page=3&per_page=20 + if clusterKind == "Hadoop" { + if d.HasChange("roles.0.edge_node") { + log.Printf("[DEBUG] Detected change in edge nodes") + edgeNodeRaw := d.Get("roles.0.edge_node").([]interface{}) + edgeNodeConfig := edgeNodeRaw[0].(map[string]interface{}) + applicationsClient := meta.(*ArmClient).HDInsight.ApplicationsClient + + oldEdgeNodeCount, newEdgeNodeCount := d.GetChange("roles.0.edge_node.0.target_instance_count") + oldEdgeNodeInt := oldEdgeNodeCount.(int) + newEdgeNodeInt := newEdgeNodeCount.(int) + + // Note: API currently doesn't support updating number of edge nodes + // if anything in the edge nodes changes, delete edge nodes then recreate them + if oldEdgeNodeInt != 0 { + err := deleteHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name) + if err != nil { + return err + } + } + + if newEdgeNodeInt != 0 { + err = createHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name, edgeNodeConfig) + if err != nil { + return err + } + } + + // we can't rely on the use of the Future here due to the node being successfully completed but now the cluster is applying those changes. + log.Printf("[DEBUG] Waiting for Hadoop Cluster to %q (Resource Group %q) to finish applying edge node", name, resourceGroup) + stateConf := &resource.StateChangeConf{ + Pending: []string{"AzureVMConfiguration", "Accepted", "HdInsightConfiguration"}, + Target: []string{"Running"}, + Refresh: hdInsightWaitForReadyRefreshFunc(ctx, client, resourceGroup, name), + Timeout: 60 * time.Minute, + MinTimeout: 15 * time.Second, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) + } + } + } + return readFunc(d, meta) } } @@ -179,3 +226,47 @@ func flattenHDInsightRoles(d *schema.ResourceData, input *hdinsight.ComputeProfi result, } } + +func createHDInsightEdgeNodes(ctx context.Context, client *hdinsight.ApplicationsClient, resourceGroup string, name string, input map[string]interface{}) error { + installScriptActions := expandHDInsightApplicationEdgeNodeInstallScriptActions(input["install_script_action"].([]interface{})) + + application := hdinsight.Application{ + Properties: &hdinsight.ApplicationProperties{ + ComputeProfile: &hdinsight.ComputeProfile{ + Roles: &[]hdinsight.Role{{ + Name: utils.String("edgenode"), + HardwareProfile: &hdinsight.HardwareProfile{ + VMSize: utils.String(input["vm_size"].(string)), + }, + TargetInstanceCount: utils.Int32(int32(input["target_instance_count"].(int))), + }}, + }, + InstallScriptActions: installScriptActions, + ApplicationType: utils.String("CustomApplication"), + }, + } + future, err := client.Create(ctx, resourceGroup, name, name, application) + if err != nil { + return fmt.Errorf("Error creating edge nodes for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for creation of edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + return nil +} + +func deleteHDInsightEdgeNodes(ctx context.Context, client *hdinsight.ApplicationsClient, resourceGroup string, name string) error { + future, err := client.Delete(ctx, resourceGroup, name, name) + + if err != nil { + return fmt.Errorf("Error deleting edge nodes for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for deletion of edge nodes for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + return nil +} diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index 03aebe9e8357..0975f7ce8b7a 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-getter/helper/url" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -216,97 +217,102 @@ type HDInsightNodeDefinition struct { FixedTargetInstanceCount *int32 } +func ValidateSchemaHDInsightNodeDefinitionVMSize() schema.SchemaValidateFunc { + return validation.StringInSlice([]string{ + // short of deploying every VM Sku for every node type for every HDInsight Cluster + // this is the list I've (@tombuildsstuff) found for valid SKU's from an endpoint in the Portal + // using another SKU causes a bad request from the API - as such this is a best effort UX + "ExtraSmall", + "Small", + "Medium", + "Large", + "ExtraLarge", + "A5", + "A6", + "A7", + "A8", + "A9", + "A10", + "A11", + "Standard_A1_V2", + "Standard_A2_V2", + "Standard_A2m_V2", + "Standard_A3", + "Standard_A4_V2", + "Standard_A4m_V2", + "Standard_A8_V2", + "Standard_A8m_V2", + "Standard_D1", + "Standard_D2", + "Standard_D3", + "Standard_D4", + "Standard_D11", + "Standard_D12", + "Standard_D13", + "Standard_D14", + "Standard_D1_V2", + "Standard_D2_V2", + "Standard_D3_V2", + "Standard_D4_V2", + "Standard_D5_V2", + "Standard_D11_V2", + "Standard_D12_V2", + "Standard_D13_V2", + "Standard_D14_V2", + "Standard_DS1_V2", + "Standard_DS2_V2", + "Standard_DS3_V2", + "Standard_DS4_V2", + "Standard_DS5_V2", + "Standard_DS11_V2", + "Standard_DS12_V2", + "Standard_DS13_V2", + "Standard_DS14_V2", + "Standard_E2_V3", + "Standard_E4_V3", + "Standard_E8_V3", + "Standard_E16_V3", + "Standard_E20_V3", + "Standard_E32_V3", + "Standard_E64_V3", + "Standard_E64i_V3", + "Standard_E2s_V3", + "Standard_E4s_V3", + "Standard_E8s_V3", + "Standard_E16s_V3", + "Standard_E20s_V3", + "Standard_E32s_V3", + "Standard_E64s_V3", + "Standard_E64is_V3", + "Standard_G1", + "Standard_G2", + "Standard_G3", + "Standard_G4", + "Standard_G5", + "Standard_F2s_V2", + "Standard_F4s_V2", + "Standard_F8s_V2", + "Standard_F16s_V2", + "Standard_F32s_V2", + "Standard_F64s_V2", + "Standard_F72s_V2", + "Standard_GS1", + "Standard_GS2", + "Standard_GS3", + "Standard_GS4", + "Standard_GS5", + "Standard_NC24", + }, true) +} + func SchemaHDInsightNodeDefinition(schemaLocation string, definition HDInsightNodeDefinition) *schema.Schema { result := map[string]*schema.Schema{ "vm_size": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - // short of deploying every VM Sku for every node type for every HDInsight Cluster - // this is the list I've (@tombuildsstuff) found for valid SKU's from an endpoint in the Portal - // using another SKU causes a bad request from the API - as such this is a best effort UX - "ExtraSmall", - "Small", - "Medium", - "Large", - "ExtraLarge", - "A5", - "A6", - "A7", - "A8", - "A9", - "A10", - "A11", - "Standard_A1_V2", - "Standard_A2_V2", - "Standard_A2m_V2", - "Standard_A3", - "Standard_A4_V2", - "Standard_A4m_V2", - "Standard_A8_V2", - "Standard_A8m_V2", - "Standard_D1", - "Standard_D2", - "Standard_D3", - "Standard_D4", - "Standard_D11", - "Standard_D12", - "Standard_D13", - "Standard_D14", - "Standard_D1_V2", - "Standard_D2_V2", - "Standard_D3_V2", - "Standard_D4_V2", - "Standard_D5_V2", - "Standard_D11_V2", - "Standard_D12_V2", - "Standard_D13_V2", - "Standard_D14_V2", - "Standard_DS1_V2", - "Standard_DS2_V2", - "Standard_DS3_V2", - "Standard_DS4_V2", - "Standard_DS5_V2", - "Standard_DS11_V2", - "Standard_DS12_V2", - "Standard_DS13_V2", - "Standard_DS14_V2", - "Standard_E2_V3", - "Standard_E4_V3", - "Standard_E8_V3", - "Standard_E16_V3", - "Standard_E20_V3", - "Standard_E32_V3", - "Standard_E64_V3", - "Standard_E64i_V3", - "Standard_E2s_V3", - "Standard_E4s_V3", - "Standard_E8s_V3", - "Standard_E16s_V3", - "Standard_E20s_V3", - "Standard_E32s_V3", - "Standard_E64s_V3", - "Standard_E64is_V3", - "Standard_G1", - "Standard_G2", - "Standard_G3", - "Standard_G4", - "Standard_G5", - "Standard_F2s_V2", - "Standard_F4s_V2", - "Standard_F8s_V2", - "Standard_F16s_V2", - "Standard_F32s_V2", - "Standard_F64s_V2", - "Standard_F72s_V2", - "Standard_GS1", - "Standard_GS2", - "Standard_GS3", - "Standard_GS4", - "Standard_GS5", - "Standard_NC24", - }, true), + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: ValidateSchemaHDInsightNodeDefinitionVMSize(), }, "username": { Type: schema.TypeString, diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster.go b/azurerm/resource_arm_hdinsight_hadoop_cluster.go index 453d1fcad6d0..aa749d3f163f 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster.go @@ -1,14 +1,19 @@ package azurerm import ( + "context" "fmt" "log" "time" "github.com/Azure/azure-sdk-for-go/services/preview/hdinsight/mgmt/2018-06-01-preview/hdinsight" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" @@ -51,7 +56,6 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), Read: schema.DefaultTimeout(5 * time.Minute), @@ -100,6 +104,48 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { "worker_node": azure.SchemaHDInsightNodeDefinition("roles.0.worker_node", hdInsightHadoopClusterWorkerNodeDefinition), "zookeeper_node": azure.SchemaHDInsightNodeDefinition("roles.0.zookeeper_node", hdInsightHadoopClusterZookeeperNodeDefinition), + + "edge_node": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target_instance_count": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 25), + }, + + "vm_size": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: azure.ValidateSchemaHDInsightNodeDefinitionVMSize(), + }, + + "install_script_action": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + "uri": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.NoEmptyStrings, + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -207,6 +253,31 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf d.SetId(*read.ID) + // We can only add an edge node after creation + if v, ok := d.GetOk("roles.0.edge_node"); ok { + edgeNodeRaw := v.([]interface{}) + applicationsClient := meta.(*ArmClient).HDInsight.ApplicationsClient + edgeNodeConfig := edgeNodeRaw[0].(map[string]interface{}) + + err := createHDInsightEdgeNodes(ctx, applicationsClient, resourceGroup, name, edgeNodeConfig) + if err != nil { + return err + } + + // we can't rely on the use of the Future here due to the node being successfully completed but now the cluster is applying those changes. + log.Printf("[DEBUG] Waiting for Hadoop Cluster to %q (Resource Group %q) to finish applying edge node", name, resourceGroup) + stateConf := &resource.StateChangeConf{ + Pending: []string{"AzureVMConfiguration", "Accepted", "HdInsightConfiguration"}, + Target: []string{"Running"}, + Refresh: hdInsightWaitForReadyRefreshFunc(ctx, client, resourceGroup, name), + Timeout: 60 * time.Minute, + MinTimeout: 15 * time.Second, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) + } + } + return resourceArmHDInsightHadoopClusterRead(d, meta) } @@ -267,6 +338,20 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac ZookeeperNodeDef: hdInsightHadoopClusterZookeeperNodeDefinition, } flattenedRoles := flattenHDInsightRoles(d, props.ComputeProfile, hadoopRoles) + + applicationsClient := meta.(*ArmClient).HDInsight.ApplicationsClient + + edgeNode, err := applicationsClient.Get(ctx, resourceGroup, name, name) + if err != nil { + if !utils.ResponseWasNotFound(edgeNode.Response) { + return fmt.Errorf("Error reading edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + if edgeNodeProps := edgeNode.Properties; edgeNodeProps != nil { + flattenedRoles = flattenHDInsightEdgeNode(flattenedRoles, edgeNodeProps) + } + if err := d.Set("roles", flattenedRoles); err != nil { return fmt.Errorf("Error flattening `roles`: %+v", err) } @@ -280,6 +365,42 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac return tags.FlattenAndSet(d, resp.Tags) } +func flattenHDInsightEdgeNode(roles []interface{}, props *hdinsight.ApplicationProperties) []interface{} { + if len(roles) == 0 || props == nil { + return roles + } + + role := roles[0].(map[string]interface{}) + + edgeNode := make(map[string]interface{}) + if computeProfile := props.ComputeProfile; computeProfile != nil { + if roles := computeProfile.Roles; roles != nil { + for _, role := range *roles { + if targetInstanceCount := role.TargetInstanceCount; targetInstanceCount != nil { + edgeNode["target_instance_count"] = targetInstanceCount + } + if hardwareProfile := role.HardwareProfile; hardwareProfile != nil { + edgeNode["vm_size"] = hardwareProfile.VMSize + } + } + } + } + + actions := make(map[string]interface{}) + if installScriptActions := props.InstallScriptActions; installScriptActions != nil { + for _, action := range *installScriptActions { + actions["name"] = action.Name + actions["uri"] = action.URI + } + } + + edgeNode["install_script_action"] = []interface{}{actions} + + role["edge_node"] = []interface{}{edgeNode} + + return []interface{}{role} +} + func expandHDInsightHadoopComponentVersion(input []interface{}) map[string]*string { vs := input[0].(map[string]interface{}) return map[string]*string{ @@ -300,3 +421,41 @@ func flattenHDInsightHadoopComponentVersion(input map[string]*string) []interfac }, } } + +func expandHDInsightApplicationEdgeNodeInstallScriptActions(input []interface{}) *[]hdinsight.RuntimeScriptAction { + actions := make([]hdinsight.RuntimeScriptAction, 0) + + for _, v := range input { + val := v.(map[string]interface{}) + + name := val["name"].(string) + uri := val["uri"].(string) + + action := hdinsight.RuntimeScriptAction{ + Name: utils.String(name), + URI: utils.String(uri), + // The only role available for edge nodes is edgenode + Roles: &[]string{"edgenode"}, + } + + actions = append(actions, action) + } + + return &actions +} + +func hdInsightWaitForReadyRefreshFunc(ctx context.Context, client *hdinsight.ClustersClient, resourceGroupName string, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + res, err := client.Get(ctx, resourceGroupName, name) + if err != nil { + return nil, "Error", fmt.Errorf("Error issuing read request in hdInsightWaitForReadyRefreshFunc to Hadoop Cluster %q (Resource Group %q): %s", name, resourceGroupName, err) + } + if props := res.Properties; props != nil { + if state := props.ClusterState; state != nil { + return res, *state, nil + } + } + + return res, "Pending", nil + } +} diff --git a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go index 40b7d8796956..d6e4eb416185 100644 --- a/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go +++ b/azurerm/resource_arm_hdinsight_hadoop_cluster_test.go @@ -250,6 +250,130 @@ func TestAccAzureRMHDInsightHadoopCluster_complete(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(t *testing.T) { + resourceName := "azurerm_hdinsight_hadoop_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hadoop_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location, 2, "Standard_D3_V2"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "roles.0.edge_node.0.password", + "roles.0.edge_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + +func TestAccAzureRMHDInsightHadoopCluster_addEdgeNodeBasic(t *testing.T) { + resourceName := "azurerm_hdinsight_hadoop_cluster.test" + ri := tf.AccRandTimeInt() + rs := strings.ToLower(acctest.RandString(11)) + location := testLocation() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy("azurerm_hdinsight_hadoop_cluster"), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_basic(ri, rs, location), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account", + }, + }, + { + Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location, 1, "Standard_D3_V2"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "roles.0.edge_node.0.password", + "roles.0.edge_node.0.vm_size", + "storage_account", + }, + }, + { + Config: testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(ri, rs, location, 3, "Standard_D4_V2"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(resourceName, "ssh_endpoint"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "roles.0.edge_node.0.password", + "roles.0.edge_node.0.vm_size", + "storage_account", + }, + }, + }, + }) +} + func testAccAzureRMHDInsightHadoopCluster_basic(rInt int, rString string, location string) string { template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) return fmt.Sprintf(` @@ -581,6 +705,67 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { `, template, rInt, rInt, rInt) } +func testAccAzureRMHDInsightHadoopCluster_edgeNodeBasic(rInt int, rString string, location string, numEdgeNodes int, instanceType string) string { + template := testAccAzureRMHDInsightHadoopCluster_template(rInt, rString, location) + return fmt.Sprintf(` +%s + +resource "azurerm_hdinsight_hadoop_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "${azurerm_resource_group.test.location}" + cluster_version = "3.6" + tier = "Standard" + + component_version { + hadoop = "2.7" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = "${azurerm_storage_container.test.id}" + storage_account_key = "${azurerm_storage_account.test.primary_access_key}" + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + edge_node { + target_instance_count = %d + vm_size = "%s" + install_script_action { + name = "script1" + uri = "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-hdinsight-linux-with-edge-node/scripts/EmptyNodeSetup.sh" + } + } + } +} +`, template, rInt, numEdgeNodes, instanceType) +} + func testAccAzureRMHDInsightHadoopCluster_template(rInt int, rString string, location string) string { return fmt.Sprintf(` resource "azurerm_resource_group" "test" { diff --git a/azurerm/testdata/hadoop_cluster_empty_node.sh b/azurerm/testdata/hadoop_cluster_empty_node.sh new file mode 100644 index 000000000000..3215f0b3f34f --- /dev/null +++ b/azurerm/testdata/hadoop_cluster_empty_node.sh @@ -0,0 +1,2 @@ +#! /bin/bash +echo "Empty node setup" \ No newline at end of file diff --git a/website/docs/r/hdinsight_hadoop_cluster.html.markdown b/website/docs/r/hdinsight_hadoop_cluster.html.markdown index 51b9eb38c234..119c3f5065ea 100644 --- a/website/docs/r/hdinsight_hadoop_cluster.html.markdown +++ b/website/docs/r/hdinsight_hadoop_cluster.html.markdown @@ -154,6 +154,8 @@ A `roles` block supports the following: * `zookeeper_node` - (Required) A `zookeeper_node` block as defined below. +* `edge_node` - (Optional) A `edge_node` block as defined below. + --- A `storage_account` block supports the following: @@ -212,6 +214,22 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `edge_node` block supports the following: + +* `vm_size` - (Required) The Size of the Virtual Machine which should be used as the Edge Nodes. Changing this forces a new resource to be created. + +* `install_script_action` - A `install_script_action` block as defined below. + +--- + +A `install_script_action` block supports the following: + +* `name` - (Required) The name of the install script action. Changing this forces a new resource to be created. + +* `uri` - (Required) The URI pointing to the script to run during the installation of the edge node. Changing this forces a new resource to be created. + ## Attributes Reference The following attributes are exported: