From 26eaca0ace48378d64aa402f123f05f052467c37 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 11 Jul 2017 17:52:28 +0300 Subject: [PATCH] examples: Migrating examples from hashicorp/terraform repository (#163) --- examples/2-vms-loadbalancer-lbrules/README.md | 22 + examples/2-vms-loadbalancer-lbrules/main.tf | 145 +++ .../2-vms-loadbalancer-lbrules/outputs.tf | 11 + .../2-vms-loadbalancer-lbrules/variables.tf | 79 ++ examples/cdn-with-storage-account/README.md | 30 + examples/cdn-with-storage-account/main.tf | 39 + examples/cdn-with-storage-account/outputs.tf | 3 + .../cdn-with-storage-account/variables.tf | 18 + examples/encrypt-running-linux-vm/README.md | 44 + examples/encrypt-running-linux-vm/main.tf | 223 +++++ examples/encrypt-running-linux-vm/outputs.tf | 8 + .../encrypt-running-linux-vm/variables.tf | 125 +++ examples/openshift-origin/README.md | 114 +++ examples/openshift-origin/main.tf | 826 ++++++++++++++++++ examples/openshift-origin/outputs.tf | 23 + examples/openshift-origin/variables.tf | 206 +++++ examples/search-create/README.md | 28 + examples/search-create/main.tf | 20 + examples/search-create/outputs.tf | 3 + examples/search-create/variables.tf | 32 + .../README.md | 22 + .../main.tf | 37 + .../outputs.tf | 7 + .../variables.tf | 12 + .../spark-and-cassandra-on-centos/README.md | 67 ++ .../spark-and-cassandra-on-centos/main.tf | 405 +++++++++ .../spark-and-cassandra-on-centos/outputs.tf | 15 + .../variables.tf | 234 +++++ examples/sql-database/README.md | 22 + examples/sql-database/main.tf | 39 + examples/sql-database/outputs.tf | 7 + examples/sql-database/variables.tf | 16 + .../traffic-manager-lb-scale-set/Readme.md | 27 + examples/traffic-manager-lb-scale-set/main.tf | 102 +++ .../terraform.tfvars.example | 7 + .../tf_modules/location.tf | 165 ++++ .../tf_modules/outputs.tf | 5 + .../traffic-manager-lb-scale-set/variables.tf | 98 +++ examples/traffic-manager-vm/README.md | 29 + examples/traffic-manager-vm/main.tf | 125 +++ examples/traffic-manager-vm/outputs.tf | 3 + examples/traffic-manager-vm/variables.tf | 71 ++ .../README.md | 23 + .../main.tf | 165 ++++ .../outputs.tf | 15 + .../variables.tf | 97 ++ examples/vm-from-user-image/README.md | 26 + examples/vm-from-user-image/main.tf | 73 ++ examples/vm-from-user-image/outputs.tf | 11 + examples/vm-from-user-image/variables.tf | 55 ++ .../vm-simple-linux-managed-disk/README.md | 22 + examples/vm-simple-linux-managed-disk/main.tf | 108 +++ .../vm-simple-linux-managed-disk/outputs.tf | 11 + .../vm-simple-linux-managed-disk/variables.tf | 75 ++ .../README.md | 35 + .../vm-specialized-vhd-existing-vnet/main.tf | 71 ++ .../outputs.tf | 11 + .../variables.tf | 90 ++ examples/vmss-ubuntu/README.md | 22 + examples/vmss-ubuntu/main.tf | 127 +++ examples/vmss-ubuntu/outputs.tf | 3 + examples/vmss-ubuntu/variables.tf | 59 ++ examples/vnet-to-vnet-peering/README.md | 24 + examples/vnet-to-vnet-peering/main.tf | 56 ++ examples/vnet-to-vnet-peering/variables.tf | 9 + examples/vnet-two-subnets/README.md | 20 + examples/vnet-two-subnets/main.tf | 32 + examples/vnet-two-subnets/variables.tf | 8 + .../wordpress-mysql-replication/README.md | 41 + examples/wordpress-mysql-replication/main.tf | 244 ++++++ .../wordpress-mysql-replication/outputs.tf | 23 + .../wordpress-mysql-replication/variables.tf | 210 +++++ .../wordpress-mysql-replication/website.tf | 140 +++ 73 files changed, 5420 insertions(+) create mode 100644 examples/2-vms-loadbalancer-lbrules/README.md create mode 100644 examples/2-vms-loadbalancer-lbrules/main.tf create mode 100644 examples/2-vms-loadbalancer-lbrules/outputs.tf create mode 100644 examples/2-vms-loadbalancer-lbrules/variables.tf create mode 100644 examples/cdn-with-storage-account/README.md create mode 100644 examples/cdn-with-storage-account/main.tf create mode 100644 examples/cdn-with-storage-account/outputs.tf create mode 100644 examples/cdn-with-storage-account/variables.tf create mode 100644 examples/encrypt-running-linux-vm/README.md create mode 100644 examples/encrypt-running-linux-vm/main.tf create mode 100644 examples/encrypt-running-linux-vm/outputs.tf create mode 100644 examples/encrypt-running-linux-vm/variables.tf create mode 100644 examples/openshift-origin/README.md create mode 100644 examples/openshift-origin/main.tf create mode 100644 examples/openshift-origin/outputs.tf create mode 100644 examples/openshift-origin/variables.tf create mode 100644 examples/search-create/README.md create mode 100644 examples/search-create/main.tf create mode 100644 examples/search-create/outputs.tf create mode 100644 examples/search-create/variables.tf create mode 100644 examples/servicebus-create-topic-and-subscription/README.md create mode 100644 examples/servicebus-create-topic-and-subscription/main.tf create mode 100644 examples/servicebus-create-topic-and-subscription/outputs.tf create mode 100644 examples/servicebus-create-topic-and-subscription/variables.tf create mode 100644 examples/spark-and-cassandra-on-centos/README.md create mode 100644 examples/spark-and-cassandra-on-centos/main.tf create mode 100644 examples/spark-and-cassandra-on-centos/outputs.tf create mode 100644 examples/spark-and-cassandra-on-centos/variables.tf create mode 100644 examples/sql-database/README.md create mode 100644 examples/sql-database/main.tf create mode 100644 examples/sql-database/outputs.tf create mode 100644 examples/sql-database/variables.tf create mode 100644 examples/traffic-manager-lb-scale-set/Readme.md create mode 100644 examples/traffic-manager-lb-scale-set/main.tf create mode 100644 examples/traffic-manager-lb-scale-set/terraform.tfvars.example create mode 100644 examples/traffic-manager-lb-scale-set/tf_modules/location.tf create mode 100644 examples/traffic-manager-lb-scale-set/tf_modules/outputs.tf create mode 100644 examples/traffic-manager-lb-scale-set/variables.tf create mode 100644 examples/traffic-manager-vm/README.md create mode 100644 examples/traffic-manager-vm/main.tf create mode 100644 examples/traffic-manager-vm/outputs.tf create mode 100644 examples/traffic-manager-vm/variables.tf create mode 100644 examples/vm-custom-image-new-storage-account/README.md create mode 100644 examples/vm-custom-image-new-storage-account/main.tf create mode 100644 examples/vm-custom-image-new-storage-account/outputs.tf create mode 100644 examples/vm-custom-image-new-storage-account/variables.tf create mode 100644 examples/vm-from-user-image/README.md create mode 100644 examples/vm-from-user-image/main.tf create mode 100644 examples/vm-from-user-image/outputs.tf create mode 100644 examples/vm-from-user-image/variables.tf create mode 100644 examples/vm-simple-linux-managed-disk/README.md create mode 100644 examples/vm-simple-linux-managed-disk/main.tf create mode 100644 examples/vm-simple-linux-managed-disk/outputs.tf create mode 100644 examples/vm-simple-linux-managed-disk/variables.tf create mode 100644 examples/vm-specialized-vhd-existing-vnet/README.md create mode 100644 examples/vm-specialized-vhd-existing-vnet/main.tf create mode 100644 examples/vm-specialized-vhd-existing-vnet/outputs.tf create mode 100644 examples/vm-specialized-vhd-existing-vnet/variables.tf create mode 100644 examples/vmss-ubuntu/README.md create mode 100644 examples/vmss-ubuntu/main.tf create mode 100644 examples/vmss-ubuntu/outputs.tf create mode 100644 examples/vmss-ubuntu/variables.tf create mode 100644 examples/vnet-to-vnet-peering/README.md create mode 100644 examples/vnet-to-vnet-peering/main.tf create mode 100644 examples/vnet-to-vnet-peering/variables.tf create mode 100644 examples/vnet-two-subnets/README.md create mode 100644 examples/vnet-two-subnets/main.tf create mode 100644 examples/vnet-two-subnets/variables.tf create mode 100644 examples/wordpress-mysql-replication/README.md create mode 100644 examples/wordpress-mysql-replication/main.tf create mode 100644 examples/wordpress-mysql-replication/outputs.tf create mode 100644 examples/wordpress-mysql-replication/variables.tf create mode 100644 examples/wordpress-mysql-replication/website.tf diff --git a/examples/2-vms-loadbalancer-lbrules/README.md b/examples/2-vms-loadbalancer-lbrules/README.md new file mode 100644 index 000000000000..5730f2ad951e --- /dev/null +++ b/examples/2-vms-loadbalancer-lbrules/README.md @@ -0,0 +1,22 @@ +# Create 2 Virtual Machines under a Load balancer and configures Load Balancing rules for the VMs + +This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-2-vms-loadbalancer-lbrules) Azure Quickstart Template. Changes to the ARM template may have occured since the creation of this example may not be reflected here. + +This template allows you to create 2 Virtual Machines under a Load balancer and configure a load balancing rule on Port 80. This template also deploys a Storage Account, Virtual Network, Public IP address, Availability Set, and Network Interfaces. + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +If you are committing this template to source control, please insure that you add this file to your .gitignore file. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. diff --git a/examples/2-vms-loadbalancer-lbrules/main.tf b/examples/2-vms-loadbalancer-lbrules/main.tf new file mode 100644 index 000000000000..495a5565f2df --- /dev/null +++ b/examples/2-vms-loadbalancer-lbrules/main.tf @@ -0,0 +1,145 @@ +# provider "azurerm" { +# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID" +# client_id = "REPLACE-WITH-YOUR-CLIENT-ID" +# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET" +# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_storage_account" "stor" { + name = "${var.dns_name}stor" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + account_type = "${var.storage_account_type}" +} + +resource "azurerm_availability_set" "avset" { + name = "${var.dns_name}avset" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + platform_fault_domain_count = 2 + platform_update_domain_count = 2 + managed = true +} + +resource "azurerm_public_ip" "lbpip" { + name = "${var.rg_prefix}-ip" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + public_ip_address_allocation = "dynamic" + domain_name_label = "${var.lb_ip_dns_name}" +} + +resource "azurerm_virtual_network" "vnet" { + name = "${var.virtual_network_name}" + location = "${var.location}" + address_space = ["${var.address_space}"] + resource_group_name = "${azurerm_resource_group.rg.name}" +} + +resource "azurerm_subnet" "subnet" { + name = "${var.rg_prefix}subnet" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_prefix = "${var.subnet_prefix}" +} + +resource "azurerm_lb" "lb" { + resource_group_name = "${azurerm_resource_group.rg.name}" + name = "${var.rg_prefix}lb" + location = "${var.location}" + + frontend_ip_configuration { + name = "LoadBalancerFrontEnd" + public_ip_address_id = "${azurerm_public_ip.lbpip.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "backend_pool" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + name = "BackendPool1" +} + +resource "azurerm_lb_nat_rule" "tcp" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + name = "RDP-VM-${count.index}" + protocol = "tcp" + frontend_port = "5000${count.index + 1}" + backend_port = 3389 + frontend_ip_configuration_name = "LoadBalancerFrontEnd" + count = 2 +} + +resource "azurerm_lb_rule" "lb_rule" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + name = "LBRule" + protocol = "tcp" + frontend_port = 80 + backend_port = 80 + frontend_ip_configuration_name = "LoadBalancerFrontEnd" + enable_floating_ip = false + backend_address_pool_id = "${azurerm_lb_backend_address_pool.backend_pool.id}" + idle_timeout_in_minutes = 5 + probe_id = "${azurerm_lb_probe.lb_probe.id}" + depends_on = ["azurerm_lb_probe.lb_probe"] +} + +resource "azurerm_lb_probe" "lb_probe" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + name = "tcpProbe" + protocol = "tcp" + port = 80 + interval_in_seconds = 5 + number_of_probes = 2 +} + +resource "azurerm_network_interface" "nic" { + name = "nic${count.index}" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + count = 2 + + ip_configuration { + name = "ipconfig${count.index}" + subnet_id = "${azurerm_subnet.subnet.id}" + private_ip_address_allocation = "Dynamic" + load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.backend_pool.id}"] + load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_rule.tcp.*.id, count.index)}"] + } +} + +resource "azurerm_virtual_machine" "vm" { + name = "vm${count.index}" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + availability_set_id = "${azurerm_availability_set.avset.id}" + vm_size = "${var.vm_size}" + network_interface_ids = ["${element(azurerm_network_interface.nic.*.id, count.index)}"] + count = 2 + + storage_image_reference { + publisher = "${var.image_publisher}" + offer = "${var.image_offer}" + sku = "${var.image_sku}" + version = "${var.image_version}" + } + + storage_os_disk { + name = "osdisk${count.index}" + create_option = "FromImage" + } + + os_profile { + computer_name = "${var.hostname}" + admin_username = "${var.admin_username}" + admin_password = "${var.admin_password}" + } +} diff --git a/examples/2-vms-loadbalancer-lbrules/outputs.tf b/examples/2-vms-loadbalancer-lbrules/outputs.tf new file mode 100644 index 000000000000..19757fa523aa --- /dev/null +++ b/examples/2-vms-loadbalancer-lbrules/outputs.tf @@ -0,0 +1,11 @@ +output "hostname" { + value = "${var.hostname}" +} + +output "vm_fqdn" { + value = "${azurerm_public_ip.lbpip.fqdn}" +} + +output "ssh_command" { + value = "ssh ${var.admin_username}@${azurerm_public_ip.lbpip.fqdn}" +} diff --git a/examples/2-vms-loadbalancer-lbrules/variables.tf b/examples/2-vms-loadbalancer-lbrules/variables.tf new file mode 100644 index 000000000000..0e652606aeb2 --- /dev/null +++ b/examples/2-vms-loadbalancer-lbrules/variables.tf @@ -0,0 +1,79 @@ +variable "resource_group" { + description = "The name of the resource group in which to create the virtual network." +} + +variable "rg_prefix" { + description = "The shortened abbreviation to represent your resource group that will go on the front of some resources." + default = "rg" +} + +variable "hostname" { + description = "VM name referenced also in storage-related names." +} + +variable "dns_name" { + description = " Label for the Domain Name. Will be used to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system." +} + +variable "lb_ip_dns_name" { + description = "DNS for Load Balancer IP" +} + +variable "location" { + description = "The location/region where the virtual network is created. Changing this forces a new resource to be created." + default = "southcentralus" +} + +variable "virtual_network_name" { + description = "The name for the virtual network." + default = "vnet" +} + +variable "address_space" { + description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created." + default = "10.0.0.0/16" +} + +variable "subnet_prefix" { + description = "The address prefix to use for the subnet." + default = "10.0.10.0/24" +} + +variable "storage_account_type" { + description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types." + default = "Standard_LRS" +} + +variable "vm_size" { + description = "Specifies the size of the virtual machine." + default = "Standard_D1" +} + +variable "image_publisher" { + description = "name of the publisher of the image (az vm image list)" + default = "MicrosoftWindowsServer" +} + +variable "image_offer" { + description = "the name of the offer (az vm image list)" + default = "WindowsServer" +} + +variable "image_sku" { + description = "image sku to apply (az vm image list)" + default = "2012-R2-Datacenter" +} + +variable "image_version" { + description = "version of the image to apply (az vm image list)" + default = "latest" +} + +variable "admin_username" { + description = "administrator user name" + default = "vmadmin" +} + +variable "admin_password" { + description = "administrator password (recommended to disable password auth)" +} diff --git a/examples/cdn-with-storage-account/README.md b/examples/cdn-with-storage-account/README.md new file mode 100644 index 000000000000..047ca5151395 --- /dev/null +++ b/examples/cdn-with-storage-account/README.md @@ -0,0 +1,30 @@ +# Create a CDN Profile, a CDN Endpoint with a Storage Account as origin + +This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-cdn-with-storage-account) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template. + +This template creates a [CDN Profile](https://docs.microsoft.com/en-us/azure/cdn/cdn-overview) and a CDN Endpoint with the origin as a Storage Account. Note that the user needs to create a public container in the Storage Account in order for CDN Endpoint to serve content from the Storage Account. + +# Important + +The endpoint will not immediately be available for use, as it takes time for the registration to propagate through the CDN. For Azure CDN from Akamai profiles, propagation will usually complete within one minute. For Azure CDN from Verizon profiles, propagation will usually complete within 90 minutes, but in some cases can take longer. + +Users who try to use the CDN domain name before the endpoint configuration has propagated to the POPs will receive HTTP 404 response codes. If it has been several hours since you created your endpoint and you're still receiving 404 responses, please see [Troubleshooting CDN endpoints returning 404 statuses](https://docs.microsoft.com/en-us/azure/cdn/cdn-troubleshoot-endpoint). + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +If you are committing this template to source control, please insure that you add this file to your `.gitignore` file. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + +![graph](/examples/azure-cdn-with-storage-account/graph.png) diff --git a/examples/cdn-with-storage-account/main.tf b/examples/cdn-with-storage-account/main.tf new file mode 100644 index 000000000000..0f50bb5eb996 --- /dev/null +++ b/examples/cdn-with-storage-account/main.tf @@ -0,0 +1,39 @@ +# provider "azurerm" { +# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID" +# client_id = "REPLACE-WITH-YOUR-CLIENT-ID" +# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET" +# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_storage_account" "stor" { + name = "${var.resource_group}stor" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + account_type = "${var.storage_account_type}" +} + +resource "azurerm_cdn_profile" "cdn" { + name = "${var.resource_group}CdnProfile1" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + sku = "Standard_Akamai" +} + +resource "azurerm_cdn_endpoint" "cdnendpt" { + name = "${var.resource_group}CdnEndpoint1" + profile_name = "${azurerm_cdn_profile.cdn.name}" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + + origin { + name = "${var.resource_group}Origin1" + host_name = "${var.host_name}" + http_port = 80 + https_port = 443 + } +} \ No newline at end of file diff --git a/examples/cdn-with-storage-account/outputs.tf b/examples/cdn-with-storage-account/outputs.tf new file mode 100644 index 000000000000..8f7c1e5c8ad1 --- /dev/null +++ b/examples/cdn-with-storage-account/outputs.tf @@ -0,0 +1,3 @@ +output "CDN Endpoint ID" { + value = "${azurerm_cdn_endpoint.cdnendpt.name}.azureedge.net" +} diff --git a/examples/cdn-with-storage-account/variables.tf b/examples/cdn-with-storage-account/variables.tf new file mode 100644 index 000000000000..d9bf51015d93 --- /dev/null +++ b/examples/cdn-with-storage-account/variables.tf @@ -0,0 +1,18 @@ +variable "resource_group" { + description = "The name of the resource group in which to create the virtual network." +} + +variable "location" { + description = "The location/region where the virtual network is created. Changing this forces a new resource to be created." + default = "southcentralus" +} + +variable "storage_account_type" { + description = "Specifies the type of the storage account" + default = "Standard_LRS" +} + +variable "host_name" { + description = "A string that determines the hostname/IP address of the origin server. This string could be a domain name, IPv4 address or IPv6 address." + default = "www.hostnameoforiginserver.com" +} \ No newline at end of file diff --git a/examples/encrypt-running-linux-vm/README.md b/examples/encrypt-running-linux-vm/README.md new file mode 100644 index 000000000000..85ee3e0f752f --- /dev/null +++ b/examples/encrypt-running-linux-vm/README.md @@ -0,0 +1,44 @@ +# Enable encryption on a running Linux VM. + +This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-encrypt-running-linux-vm) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template. + +This template enables encryption on a running linux vm using AAD client secret. This template assumes that the VM is located in the same region as the resource group. If not, please edit the template to pass appropriate location for the VM sub-resources. + +## Prerequisites: +Azure Disk Encryption securely stores the encryption secrets in a specified Azure Key Vault. + +Create the Key Vault and assign appropriate access policies. You may use this script to ensure that your vault is properly configured: [AzureDiskEncryptionPreRequisiteSetup.ps1](https://github.com/Azure/azure-powershell/blob/10fc37e9141af3fde6f6f79b9d46339b73cf847d/src/ResourceManager/Compute/Commands.Compute/Extension/AzureDiskEncryption/Scripts/AzureDiskEncryptionPreRequisiteSetup.ps1) + +Use the below PS cmdlet for getting the `key_vault_secret_url` and `key_vault_resource_id`. + +``` + Get-AzureRmKeyVault -VaultName $KeyVaultName -ResourceGroupName $rgname +``` + +References: + +- [White paper](https://azure.microsoft.com/en-us/documentation/articles/azure-security-disk-encryption/) +- [Explore Azure Disk Encryption with Azure Powershell](https://blogs.msdn.microsoft.com/azuresecurity/2015/11/16/explore-azure-disk-encryption-with-azure-powershell/) +- [Explore Azure Disk Encryption with Azure PowerShell – Part 2](http://blogs.msdn.com/b/azuresecurity/archive/2015/11/21/explore-azure-disk-encryption-with-azure-powershell-part-2.aspx) + + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file. + +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +If you are committing this template to source control, please insure that you add this file to your .gitignore file. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + +![graph](/examples/azure-encrypt-running-linux-vm/graph.png) diff --git a/examples/encrypt-running-linux-vm/main.tf b/examples/encrypt-running-linux-vm/main.tf new file mode 100644 index 000000000000..fcd9736aa493 --- /dev/null +++ b/examples/encrypt-running-linux-vm/main.tf @@ -0,0 +1,223 @@ +# provider "azurerm" { +# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID" +# client_id = "REPLACE-WITH-YOUR-CLIENT-ID" +# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET" +# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_virtual_network" "vnet" { + name = "${var.hostname}vnet" + location = "${var.location}" + address_space = ["${var.address_space}"] + resource_group_name = "${azurerm_resource_group.rg.name}" +} + +resource "azurerm_subnet" "subnet" { + name = "${var.hostname}subnet" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_prefix = "${var.subnet_prefix}" +} + +resource "azurerm_network_interface" "nic" { + name = "nic" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + + ip_configuration { + name = "ipconfig" + subnet_id = "${azurerm_subnet.subnet.id}" + private_ip_address_allocation = "Dynamic" + } +} + +resource "azurerm_storage_account" "stor" { + name = "${var.hostname}stor" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + account_type = "${var.storage_account_type}" +} + +resource "azurerm_virtual_machine" "vm" { + name = "${var.hostname}" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + vm_size = "${var.vm_size}" + network_interface_ids = ["${azurerm_network_interface.nic.id}"] + + storage_image_reference { + publisher = "${var.image_publisher}" + offer = "${var.image_offer}" + sku = "${var.image_sku}" + version = "${var.image_version}" + } + + storage_os_disk { + name = "${var.hostname}osdisk" + create_option = "FromImage" + disk_size_gb = "30" + } + + os_profile { + computer_name = "${var.hostname}" + admin_username = "${var.admin_username}" + admin_password = "${var.admin_password}" + } + + os_profile_linux_config { + disable_password_authentication = false + } +} + +resource "azurerm_template_deployment" "linux_vm" { + name = "encrypt" + resource_group_name = "${azurerm_resource_group.rg.name}" + deployment_mode = "Incremental" + depends_on = ["azurerm_virtual_machine.vm"] + + template_body = <**Master subnet:** 10.0.0.0/24
**Node subnet:** 10.0.1.0/24 | +|Load Balancer |2 probes and two rules for TCP 80 and TCP 443 | +|Public IP Addresses|OpenShift Master public IP
OpenShift Router public IP attached to Load Balancer | +|Storage Accounts |2 Storage Accounts | +|Virtual Machines |Single master
User-defined number of nodes
All VMs include a single attached data disk for Docker thin pool logical volume| + +If you have a Red Hat subscription and would like to deploy an OpenShift Container Platform (formerly OpenShift Enterprise) cluster, please visit: https://github.com/Microsoft/openshift-container-platform + +### Generate SSH Keys + +You'll need to generate an SSH key pair in order to provision this template. Ensure that you do not include a passcode with the private key.
+If you are using a Windows computer, you can download `puttygen.exe`. You will need to export to OpenSSH (from Conversions menu) to get a valid Private Key for use in the Template.
+From a Linux or Mac, you can just use the `ssh-keygen` command. Once you are finished deploying the cluster, you can always generate a new key pair that uses a passphrase and replaces the original one used during initial deployment. + +### Create Key Vault to store SSH Private Key + +You will need to create a Key Vault to store your SSH Private Key that will then be used as part of the deployment. + +1. **Create Key Vault using Powershell**
+ a. Create new resource group: New-AzureRMResourceGroup -Name 'ResourceGroupName' -Location 'West US'
+ b. Create key vault: New-AzureRmKeyVault -VaultName 'KeyVaultName' -ResourceGroup 'ResourceGroupName' -Location 'West US'
+ c. Create variable with sshPrivateKey: $securesecret = ConvertTo-SecureString -String '[copy ssh Private Key here - including line feeds]' -AsPlainText -Force
+ d. Create Secret: Set-AzureKeyVaultSecret -Name 'SecretName' -SecretValue $securesecret -VaultName 'KeyVaultName'
+ e. Enable the Key Vault for Template Deployments: Set-AzureRmKeyVaultAccessPolicy -VaultName 'KeyVaultName' -ResourceGroupName 'ResourceGroupName' -EnabledForTemplateDeployment + +2. **Create Key Vault using Azure CLI 1.0**
+ a. Create new Resource Group: azure group create \ \
+ Ex: `azure group create ResourceGroupName 'East US'`
+ b. Create Key Vault: azure keyvault create -u \ -g \ -l \
+ Ex: `azure keyvault create -u KeyVaultName -g ResourceGroupName -l 'East US'`
+ c. Create Secret: azure keyvault secret set -u \ -s \ --file \
+ Ex: `azure keyvault secret set -u KeyVaultName -s SecretName --file ~/.ssh/id_rsa`
+ d. Enable the Keyvvault for Template Deployment: azure keyvault set-policy -u \ --enabled-for-template-deployment true
+ Ex: `azure keyvault set-policy -u KeyVaultName --enabled-for-template-deployment true`
+ +3. **Create Key Vault using Azure CLI 2.0**
+ a. Create new Resource Group: az group create -n \ -l \
+ Ex: `az group create -n ResourceGroupName -l 'East US'`
+ b. Create Key Vault: az keyvault create -n \ -g \ -l \ --enabled-for-template-deployment true
+ Ex: `az keyvault create -n KeyVaultName -g ResourceGroupName -l 'East US' --enabled-for-template-deployment true`
+ c. Create Secret: az keyvault secret set --vault-name \ -n \ --file \
+ Ex: `az keyvault secret set --vault-name KeyVaultName -n SecretName --file ~/.ssh/id_rsa`
+3. **Clone the Openshift repository [here](https://github.com/Microsoft/openshift-origin)**
+ a. Note the local script path, this will be needed for remote-execs on the remote machines.
+ +## Deploy Template + +Once you have collected all of the prerequisites for the template, you can deploy the template via terraform. + +Monitor deployment via Terraform and get the console URL from outputs of successful deployment which will look something like (if using sample parameters file and "West US 2" location): + +`https://me-master1.westus2.cloudapp.azure.com:8443/console` + +The cluster will use self-signed certificates. Accept the warning and proceed to the login page. + +### NOTE + +Ensure combination of openshiftMasterPublicIpDnsLabelPrefix, and nodeLbPublicIpDnsLabelPrefix parameters, combined with the deployment location give you globally unique URL for the cluster or deployment will fail at the step of allocating public IPs with fully-qualified-domain-names as above. + +### NOTE + +This template deploys a bastion host, merely for the connection provisioner and allowing remote-exec to run commands on machines without public IPs; notice the specific dependencies on the order in which VMs are created for this to work properly. + +### NOTE + +The OpenShift Ansible playbook does take a while to run when using VMs backed by Standard Storage. VMs backed by Premium Storage are faster. If you want Premimum Storage, select a DS or GS series VM. +
+Be sure to follow the OpenShift instructions to create the ncessary DNS entry for the OpenShift Router for access to applications. + +## Post-Deployment Operations + +This template creates an OpenShift user but does not make it a full OpenShift user. To do that, please perform the following. + +1. SSH in to master node +2. Execute the following command: + + ```sh + sudo oadm policy add-cluster-role-to-user cluster-admin + ``` +### Additional OpenShift Configuration Options + +You can configure additional settings per the official [OpenShift Origin Documentation](https://docs.openshift.org/latest/welcome/index.html). + +Few options you have + +1. Deployment Output + + a. openshiftConsoleUrl the openshift console url
+ b. openshiftMasterSsh ssh command for master node
+ c. openshiftNodeLoadBalancerFQDN node load balancer
+ + get the deployment output data + + a. portal.azure.com -> choose 'Resource groups' select your group select 'Deployments' and there the deployment 'Microsoft.Template'. As output from the deployment it contains information about the openshift console url, ssh command and load balancer url.
+ b. With the Azure CLI : azure group deployment list <resource group name> + +2. add additional users. you can find much detail about this in the openshift.org documentation under 'Cluster Administration' and 'Managing Users'. This installation uses htpasswd as the identity provider. To add more user ssh in to master node and execute following command: + + ```sh + sudo htpasswd /etc/origin/master/htpasswd user1 + ``` + Now this user can login with the 'oc' CLI tool or the openshift console url. diff --git a/examples/openshift-origin/main.tf b/examples/openshift-origin/main.tf new file mode 100644 index 000000000000..7237b84642d1 --- /dev/null +++ b/examples/openshift-origin/main.tf @@ -0,0 +1,826 @@ +provider "azurerm" { + subscription_id = "${var.subscription_id}" + client_id = "${var.aad_client_id}" + client_secret = "${var.aad_client_secret}" + tenant_id = "${var.tenant_id}" +} + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group_name}" + location = "${var.resource_group_location}" +} + +# ******* NETWORK SECURITY GROUPS *********** + +resource "azurerm_network_security_group" "master_nsg" { + name = "${var.openshift_cluster_prefix}-master-nsg" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + + security_rule { + name = "allow_SSH_in_all" + description = "Allow SSH in from all locations" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "allow_HTTPS_all" + description = "Allow HTTPS connections from all locations" + priority = 200 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "allow_OpenShift_console_in_all" + description = "Allow OpenShift Console connections from all locations" + priority = 300 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "8443" + source_address_prefix = "*" + destination_address_prefix = "*" + } +} + +resource "azurerm_network_security_group" "infra_nsg" { + name = "${var.openshift_cluster_prefix}-infra-nsg" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + + security_rule { + name = "allow_SSH_in_all" + description = "Allow SSH in from all locations" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "allow_HTTPS_all" + description = "Allow HTTPS connections from all locations" + priority = 200 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "allow_HTTP_in_all" + description = "Allow HTTP connections from all locations" + priority = 300 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "*" + destination_address_prefix = "*" + } +} + +resource "azurerm_network_security_group" "node_nsg" { + name = "${var.openshift_cluster_prefix}-node-nsg" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + + security_rule { + name = "allow_SSH_in_all" + description = "Allow SSH in from all locations" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "allow_HTTPS_all" + description = "Allow HTTPS connections from all locations" + priority = 200 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "allow_HTTP_in_all" + description = "Allow HTTP connections from all locations" + priority = 300 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "*" + destination_address_prefix = "*" + } +} + +# ******* STORAGE ACCOUNTS *********** + +resource "azurerm_storage_account" "bastion_storage_account" { + name = "${var.openshift_cluster_prefix}bsa" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + account_type = "${var.storage_account_type_map["${var.bastion_vm_size}"]}" +} + +resource "azurerm_storage_account" "master_storage_account" { + name = "${var.openshift_cluster_prefix}msa" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + account_type = "${var.storage_account_type_map["${var.master_vm_size}"]}" +} + +resource "azurerm_storage_account" "infra_storage_account" { + name = "${var.openshift_cluster_prefix}infrasa" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + account_type = "${var.storage_account_type_map["${var.infra_vm_size}"]}" +} + +resource "azurerm_storage_account" "nodeos_storage_account" { + name = "${var.openshift_cluster_prefix}nodeossa" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + account_type = "${var.storage_account_type_map["${var.node_vm_size}"]}" +} + +resource "azurerm_storage_account" "nodedata_storage_account" { + name = "${var.openshift_cluster_prefix}nodedatasa" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + account_type = "${var.storage_account_type_map["${var.node_vm_size}"]}" +} + +resource "azurerm_storage_account" "registry_storage_account" { + name = "${var.openshift_cluster_prefix}regsa" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + account_type = "Standard_LRS" +} + +resource "azurerm_storage_account" "persistent_volume_storage_account" { + name = "${var.openshift_cluster_prefix}pvsa" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + account_type = "Standard_LRS" +} + +# ******* AVAILABILITY SETS *********** + +resource "azurerm_availability_set" "master" { + name = "masteravailabilityset" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" +} + +resource "azurerm_availability_set" "infra" { + name = "infraavailabilityset" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" +} + +resource "azurerm_availability_set" "node" { + name = "nodeavailabilityset" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" +} + +# ******* IP ADDRESSES *********** + +resource "azurerm_public_ip" "bastion_pip" { + name = "bastionpip" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + public_ip_address_allocation = "Static" + domain_name_label = "${var.openshift_cluster_prefix}-bastion" +} + +resource "azurerm_public_ip" "openshift_master_pip" { + name = "masterpip" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + public_ip_address_allocation = "Static" + domain_name_label = "${var.openshift_cluster_prefix}" +} + +resource "azurerm_public_ip" "infra_lb_pip" { + name = "infraip" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + public_ip_address_allocation = "Static" + domain_name_label = "${var.openshift_cluster_prefix}infrapip" +} + +# ******* VNETS / SUBNETS *********** + +resource "azurerm_virtual_network" "vnet" { + name = "openshiftvnet" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_space = ["10.0.0.0/8"] + depends_on = ["azurerm_virtual_network.vnet"] +} + +resource "azurerm_subnet" "master_subnet" { + name = "mastersubnet" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_prefix = "10.1.0.0/16" + depends_on = ["azurerm_virtual_network.vnet"] +} + +resource "azurerm_subnet" "node_subnet" { + name = "nodesubnet" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_prefix = "10.2.0.0/16" +} + +# ******* MASTER LOAD BALANCER *********** + +resource "azurerm_lb" "master_lb" { + name = "masterloadbalancer" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + depends_on = ["azurerm_public_ip.openshift_master_pip"] + + frontend_ip_configuration { + name = "LoadBalancerFrontEnd" + public_ip_address_id = "${azurerm_public_ip.openshift_master_pip.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "master_lb" { + resource_group_name = "${azurerm_resource_group.rg.name}" + name = "loadBalancerBackEnd" + loadbalancer_id = "${azurerm_lb.master_lb.id}" + depends_on = ["azurerm_lb.master_lb"] +} + +resource "azurerm_lb_probe" "master_lb" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.master_lb.id}" + name = "8443Probe" + port = 8443 + interval_in_seconds = 5 + number_of_probes = 2 + protocol = "Tcp" + depends_on = ["azurerm_lb.master_lb"] +} + +resource "azurerm_lb_rule" "master_lb" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.master_lb.id}" + name = "OpenShiftAdminConsole" + protocol = "Tcp" + frontend_port = 8443 + backend_port = 8443 + frontend_ip_configuration_name = "LoadBalancerFrontEnd" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.master_lb.id}" + load_distribution = "SourceIP" + idle_timeout_in_minutes = 30 + probe_id = "${azurerm_lb_probe.master_lb.id}" + enable_floating_ip = false + depends_on = ["azurerm_lb_probe.master_lb", "azurerm_lb.master_lb", "azurerm_lb_backend_address_pool.master_lb"] +} + +resource "azurerm_lb_nat_rule" "master_lb" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.master_lb.id}" + name = "${azurerm_lb.master_lb.name}-SSH-${count.index}" + protocol = "Tcp" + frontend_port = "${count.index + 2200}" + backend_port = 22 + frontend_ip_configuration_name = "LoadBalancerFrontEnd" + count = "${var.master_instance_count}" + depends_on = ["azurerm_lb.master_lb"] +} + +# ******* INFRA LOAD BALANCER *********** + +resource "azurerm_lb" "infra_lb" { + name = "infraloadbalancer" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + depends_on = ["azurerm_public_ip.infra_lb_pip"] + + frontend_ip_configuration { + name = "LoadBalancerFrontEnd" + public_ip_address_id = "${azurerm_public_ip.infra_lb_pip.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "infra_lb" { + resource_group_name = "${azurerm_resource_group.rg.name}" + name = "loadBalancerBackEnd" + loadbalancer_id = "${azurerm_lb.infra_lb.id}" + depends_on = ["azurerm_lb.infra_lb"] +} + +resource "azurerm_lb_probe" "infra_lb_http_probe" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.infra_lb.id}" + name = "httpProbe" + port = 80 + interval_in_seconds = 5 + number_of_probes = 2 + protocol = "Tcp" + depends_on = ["azurerm_lb.infra_lb"] +} + +resource "azurerm_lb_probe" "infra_lb_https_probe" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.infra_lb.id}" + name = "httpsProbe" + port = 443 + interval_in_seconds = 5 + number_of_probes = 2 + protocol = "Tcp" +} + +resource "azurerm_lb_rule" "infra_lb_http" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.infra_lb.id}" + name = "OpenShiftRouterHTTP" + protocol = "Tcp" + frontend_port = 80 + backend_port = 80 + frontend_ip_configuration_name = "LoadBalancerFrontEnd" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.infra_lb.id}" + probe_id = "${azurerm_lb_probe.infra_lb_http_probe.id}" + depends_on = ["azurerm_lb_probe.infra_lb_http_probe", "azurerm_lb.infra_lb", "azurerm_lb_backend_address_pool.infra_lb"] +} + +resource "azurerm_lb_rule" "infra_lb_https" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.infra_lb.id}" + name = "OpenShiftRouterHTTPS" + protocol = "Tcp" + frontend_port = 443 + backend_port = 443 + frontend_ip_configuration_name = "LoadBalancerFrontEnd" + backend_address_pool_id = "${azurerm_lb_backend_address_pool.infra_lb.id}" + probe_id = "${azurerm_lb_probe.infra_lb_https_probe.id}" + depends_on = ["azurerm_lb_probe.infra_lb_https_probe", "azurerm_lb_backend_address_pool.infra_lb"] +} + +# ******* NETWORK INTERFACES *********** + +resource "azurerm_network_interface" "bastion_nic" { + name = "bastionnic${count.index}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + network_security_group_id = "${azurerm_network_security_group.master_nsg.id}" + + ip_configuration { + name = "bastionip${count.index}" + subnet_id = "${azurerm_subnet.master_subnet.id}" + private_ip_address_allocation = "Dynamic" + public_ip_address_id = "${azurerm_public_ip.bastion_pip.id}" + } +} + +resource "azurerm_network_interface" "master_nic" { + name = "masternic${count.index}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + network_security_group_id = "${azurerm_network_security_group.master_nsg.id}" + count = "${var.master_instance_count}" + + ip_configuration { + name = "masterip${count.index}" + subnet_id = "${azurerm_subnet.master_subnet.id}" + private_ip_address_allocation = "Dynamic" + load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.master_lb.id}"] + load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_rule.master_lb.*.id, count.index)}"] + } +} + +resource "azurerm_network_interface" "infra_nic" { + name = "infra_nic${count.index}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + network_security_group_id = "${azurerm_network_security_group.infra_nsg.id}" + count = "${var.infra_instance_count}" + + ip_configuration { + name = "infraip${count.index}" + subnet_id = "${azurerm_subnet.master_subnet.id}" + private_ip_address_allocation = "Dynamic" + load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.infra_lb.id}"] + } +} + +resource "azurerm_network_interface" "node_nic" { + name = "node_nic${count.index}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + network_security_group_id = "${azurerm_network_security_group.node_nsg.id}" + count = "${var.node_instance_count}" + + ip_configuration { + name = "nodeip${count.index}" + subnet_id = "${azurerm_subnet.node_subnet.id}" + private_ip_address_allocation = "Dynamic" + } +} + +# ******* Bastion Host ******* + +resource "azurerm_virtual_machine" "bastion" { + name = "${var.openshift_cluster_prefix}-bastion-1" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + network_interface_ids = ["${azurerm_network_interface.bastion_nic.id}"] + vm_size = "${var.bastion_vm_size}" + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true + + tags { + displayName = "${var.openshift_cluster_prefix}-bastion VM Creation" + } + + os_profile { + computer_name = "${var.openshift_cluster_prefix}-bastion-${count.index}" + admin_username = "${var.admin_username}" + admin_password = "${var.openshift_password}" + } + + os_profile_linux_config { + disable_password_authentication = true + + ssh_keys { + path = "/home/${var.admin_username}/.ssh/authorized_keys" + key_data = "${var.ssh_public_key}" + } + } + + storage_image_reference { + publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}" + offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}" + sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}" + version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}" + } + + storage_os_disk { + name = "${var.openshift_cluster_prefix}-master-osdisk${count.index}" + vhd_uri = "${azurerm_storage_account.bastion_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-bastion-osdisk.vhd" + caching = "ReadWrite" + create_option = "FromImage" + disk_size_gb = 60 + } +} + +# ******* Master VMs ******* + +resource "azurerm_virtual_machine" "master" { + name = "${var.openshift_cluster_prefix}-master-${count.index}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + availability_set_id = "${azurerm_availability_set.master.id}" + network_interface_ids = ["${element(azurerm_network_interface.master_nic.*.id, count.index)}"] + vm_size = "${var.master_vm_size}" + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true + count = "${var.master_instance_count}" + depends_on = ["azurerm_virtual_machine.infra", "azurerm_virtual_machine.node"] + + tags { + displayName = "${var.openshift_cluster_prefix}-master VM Creation" + } + + connection { + host = "${azurerm_public_ip.openshift_master_pip.fqdn}" + user = "${var.admin_username}" + port = 2200 + private_key = "${file(var.connection_private_ssh_key_path)}" + } + + provisioner "file" { + source = "${var.openshift_script_path}/masterPrep.sh" + destination = "masterPrep.sh" + } + + provisioner "file" { + source = "${var.openshift_script_path}/deployOpenShift.sh" + destination = "deployOpenShift.sh" + } + + provisioner "remote-exec" { + inline = [ + "chmod +x masterPrep.sh", + "chmod +x deployOpenShift.sh", + "sudo bash masterPrep.sh \"${azurerm_storage_account.persistent_volume_storage_account.name}\" \"${var.admin_username}\" && sudo bash deployOpenShift.sh \"${var.admin_username}\" \"${var.openshift_password}\" \"${var.key_vault_secret}\" \"${var.openshift_cluster_prefix}-master\" \"${azurerm_public_ip.openshift_master_pip.fqdn}\" \"${azurerm_public_ip.openshift_master_pip.ip_address}\" \"${var.openshift_cluster_prefix}-infra\" \"${var.openshift_cluster_prefix}-node\" \"${var.node_instance_count}\" \"${var.infra_instance_count}\" \"${var.master_instance_count}\" \"${var.default_sub_domain_type}\" \"${azurerm_storage_account.registry_storage_account.name}\" \"${azurerm_storage_account.registry_storage_account.primary_access_key}\" \"${var.tenant_id}\" \"${var.subscription_id}\" \"${var.aad_client_id}\" \"${var.aad_client_secret}\" \"${azurerm_resource_group.rg.name}\" \"${azurerm_resource_group.rg.location}\" \"${var.key_vault_name}\"" + ] + } + + os_profile { + computer_name = "${var.openshift_cluster_prefix}-master-${count.index}" + admin_username = "${var.admin_username}" + admin_password = "${var.openshift_password}" + } + + os_profile_linux_config { + disable_password_authentication = true + + ssh_keys { + path = "/home/${var.admin_username}/.ssh/authorized_keys" + key_data = "${var.ssh_public_key}" + } + } + + storage_image_reference { + publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}" + offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}" + sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}" + version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}" + } + + storage_os_disk { + name = "${var.openshift_cluster_prefix}-master-osdisk${count.index}" + vhd_uri = "${azurerm_storage_account.master_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-master-osdisk${count.index}.vhd" + caching = "ReadWrite" + create_option = "FromImage" + disk_size_gb = 60 + } + + storage_data_disk { + name = "${var.openshift_cluster_prefix}-master-docker-pool${count.index}" + vhd_uri = "${azurerm_storage_account.master_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-master-docker-pool${count.index}.vhd" + disk_size_gb = "${var.data_disk_size}" + create_option = "Empty" + lun = 0 + } +} + +# ******* Infra VMs ******* + +resource "azurerm_virtual_machine" "infra" { + name = "${var.openshift_cluster_prefix}-infra-${count.index}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + availability_set_id = "${azurerm_availability_set.infra.id}" + network_interface_ids = ["${element(azurerm_network_interface.infra_nic.*.id, count.index)}"] + vm_size = "${var.infra_vm_size}" + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true + count = "${var.infra_instance_count}" + + tags { + displayName = "${var.openshift_cluster_prefix}-infra VM Creation" + } + + connection { + type = "ssh" + bastion_host = "${azurerm_public_ip.bastion_pip.fqdn}" + bastion_user = "${var.admin_username}" + bastion_private_key = "${file(var.connection_private_ssh_key_path)}" + host = "${element(azurerm_network_interface.infra_nic.*.private_ip_address, count.index)}" + user = "${var.admin_username}" + private_key = "${file(var.connection_private_ssh_key_path)}" + } + + provisioner "file" { + source = "${var.openshift_script_path}/nodePrep.sh" + destination = "nodePrep.sh" + } + + provisioner "remote-exec" { + inline = [ + "chmod +x nodePrep.sh", + "sudo bash nodePrep.sh", + ] + } + + os_profile { + computer_name = "${var.openshift_cluster_prefix}-infra-${count.index}" + admin_username = "${var.admin_username}" + admin_password = "${var.openshift_password}" + } + + os_profile_linux_config { + disable_password_authentication = true + + ssh_keys { + path = "/home/${var.admin_username}/.ssh/authorized_keys" + key_data = "${var.ssh_public_key}" + } + } + + storage_image_reference { + publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}" + offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}" + sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}" + version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}" + } + + storage_os_disk { + name = "${var.openshift_cluster_prefix}-infra-osdisk${count.index}" + vhd_uri = "${azurerm_storage_account.infra_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-infra-osdisk${count.index}.vhd" + caching = "ReadWrite" + create_option = "FromImage" + } + + storage_data_disk { + name = "${var.openshift_cluster_prefix}-infra-docker-pool" + vhd_uri = "${azurerm_storage_account.infra_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-infra-docker-pool${count.index}.vhd" + disk_size_gb = "${var.data_disk_size}" + create_option = "Empty" + lun = 0 + } +} + +# ******* Node VMs ******* + +resource "azurerm_virtual_machine" "node" { + name = "${var.openshift_cluster_prefix}-node-${count.index}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + availability_set_id = "${azurerm_availability_set.node.id}" + network_interface_ids = ["${element(azurerm_network_interface.node_nic.*.id, count.index)}"] + vm_size = "${var.node_vm_size}" + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true + count = "${var.node_instance_count}" + + tags { + displayName = "${var.openshift_cluster_prefix}-node VM Creation" + } + + connection { + type = "ssh" + bastion_host = "${azurerm_public_ip.bastion_pip.fqdn}" + bastion_user = "${var.admin_username}" + bastion_private_key = "${file(var.connection_private_ssh_key_path)}" + host = "${element(azurerm_network_interface.node_nic.*.private_ip_address, count.index)}" + user = "${var.admin_username}" + private_key = "${file(var.connection_private_ssh_key_path)}" + } + + provisioner "file" { + source = "${var.openshift_script_path}/nodePrep.sh" + destination = "nodePrep.sh" + } + + provisioner "remote-exec" { + inline = [ + "chmod +x nodePrep.sh", + "sudo bash nodePrep.sh", + ] + } + + os_profile { + computer_name = "${var.openshift_cluster_prefix}-node-${count.index}" + admin_username = "${var.admin_username}" + admin_password = "${var.openshift_password}" + } + + os_profile_linux_config { + disable_password_authentication = true + + ssh_keys { + path = "/home/${var.admin_username}/.ssh/authorized_keys" + key_data = "${var.ssh_public_key}" + } + } + + storage_image_reference { + publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}" + offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}" + sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}" + version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}" + } + + storage_os_disk { + name = "${var.openshift_cluster_prefix}-node-osdisk" + vhd_uri = "${azurerm_storage_account.nodeos_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-node-osdisk${count.index}.vhd" + caching = "ReadWrite" + create_option = "FromImage" + } + + storage_data_disk { + name = "${var.openshift_cluster_prefix}-node-docker-pool${count.index}" + vhd_uri = "${azurerm_storage_account.nodeos_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-node-docker-pool${count.index}.vhd" + disk_size_gb = "${var.data_disk_size}" + create_option = "Empty" + lun = 0 + } +} + +# ******* VM EXTENSIONS ******* + + +# resource "azurerm_virtual_machine_extension" "deploy_open_shift_master" { +# name = "masterOpShExt${count.index}" +# location = "${azurerm_resource_group.rg.location}" +# resource_group_name = "${azurerm_resource_group.rg.name}" +# virtual_machine_name = "${element(azurerm_virtual_machine.master.*.name, count.index)}" +# publisher = "Microsoft.Azure.Extensions" +# type = "CustomScript" +# type_handler_version = "2.0" +# auto_upgrade_minor_version = true +# depends_on = ["azurerm_virtual_machine.master", "azurerm_virtual_machine_extension.node_prep", "azurerm_storage_container.vhds", "azurerm_virtual_machine_extension.deploy_infra"] +# +# settings = <Traffic Manager routing methods for details of the different routing methods available. +- Create or update a Traffic Manager profile for details of the JSON elements relating to a Traffic Manager profile. + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +If you are committing this template to source control, please insure that you add this file to your `.gitignore` file. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + +![`terraform graph`](/examples/azure-traffic-manager-vm/graph.png) diff --git a/examples/traffic-manager-vm/main.tf b/examples/traffic-manager-vm/main.tf new file mode 100644 index 000000000000..ef34a8ad5391 --- /dev/null +++ b/examples/traffic-manager-vm/main.tf @@ -0,0 +1,125 @@ +# provider "azurerm" { +# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID" +# client_id = "REPLACE-WITH-YOUR-CLIENT-ID" +# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET" +# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_public_ip" "pip" { + name = "ip${count.index}" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + public_ip_address_allocation = "dynamic" + domain_name_label = "${var.dns_name}${count.index}" + count = "${var.num_vms}" +} + +resource "azurerm_virtual_network" "vnet" { + name = "${var.vnet}" + location = "${var.location}" + address_space = ["${var.address_space}"] + resource_group_name = "${azurerm_resource_group.rg.name}" +} + +resource "azurerm_subnet" "subnet" { + name = "${var.subnet_name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_prefix = "${var.subnet_prefix}" +} + +resource "azurerm_network_interface" "nic" { + name = "nic${count.index}" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + count = "${var.num_vms}" + + ip_configuration { + name = "ipconfig${count.index}" + subnet_id = "${azurerm_subnet.subnet.id}" + private_ip_address_allocation = "Dynamic" + public_ip_address_id = "${element(azurerm_public_ip.pip.*.id, count.index)}" + } +} + +resource "azurerm_virtual_machine" "vm" { + name = "vm${count.index}" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + vm_size = "${var.vm_size}" + count = "${var.num_vms}" + network_interface_ids = ["${element(azurerm_network_interface.nic.*.id, count.index)}"] + + storage_image_reference { + publisher = "${var.image_publisher}" + offer = "${var.image_offer}" + sku = "${var.image_sku}" + version = "${var.image_version}" + } + + storage_os_disk { + name = "osdisk${count.index}" + create_option = "FromImage" + } + + os_profile { + computer_name = "vm${count.index}" + admin_username = "${var.admin_username}" + admin_password = "${var.admin_password}" + } + + os_profile_linux_config { + disable_password_authentication = false + } +} + +resource "azurerm_virtual_machine_extension" "ext" { + depends_on = ["azurerm_virtual_machine.vm"] + name = "CustomScript" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + virtual_machine_name = "vm${count.index}" + publisher = "Microsoft.Azure.Extensions" + type = "CustomScript" + type_handler_version = "2.0" + count = "${var.num_vms}" + auto_upgrade_minor_version = true + + settings = < Prerequisite - The generalized image VHD should exist, as well as a Storage Account for boot diagnostics + +This template allows you to create a Virtual Machine from an unmanaged User image vhd. This template also deploys a Virtual Network, Public IP addresses and a Network Interface. + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +If you are committing this template to source control, please insure that you add this file to your `.gitignore` file. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + +![graph](/examples/azure-vm-from-user-image/graph.png) diff --git a/examples/vm-from-user-image/main.tf b/examples/vm-from-user-image/main.tf new file mode 100644 index 000000000000..1295afb2ba16 --- /dev/null +++ b/examples/vm-from-user-image/main.tf @@ -0,0 +1,73 @@ +# provider "azurerm" { +# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID" +# client_id = "REPLACE-WITH-YOUR-CLIENT-ID" +# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET" +# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_virtual_network" "vnet" { + name = "${var.hostname}vnet" + location = "${var.location}" + address_space = ["${var.address_space}"] + resource_group_name = "${azurerm_resource_group.rg.name}" +} + +resource "azurerm_subnet" "subnet" { + name = "${var.hostname}subnet" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_prefix = "${var.subnet_prefix}" +} + +resource "azurerm_network_interface" "nic" { + name = "${var.hostname}nic" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + + ip_configuration { + name = "${var.hostname}ipconfig" + subnet_id = "${azurerm_subnet.subnet.id}" + private_ip_address_allocation = "Dynamic" + public_ip_address_id = "${azurerm_public_ip.pip.id}" + } +} + +resource "azurerm_public_ip" "pip" { + name = "${var.hostname}-ip" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + public_ip_address_allocation = "Dynamic" + domain_name_label = "${var.hostname}" +} + +resource "azurerm_virtual_machine" "vm" { + name = "${var.hostname}" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + vm_size = "${var.vm_size}" + network_interface_ids = ["${azurerm_network_interface.nic.id}"] + + storage_os_disk { + name = "${var.hostname}-osdisk1" + image_uri = "${var.image_uri}" + vhd_uri = "https://${var.storage_account_name}.blob.core.windows.net/vhds/${var.hostname}-osdisk.vhd" + os_type = "${var.os_type}" + caching = "ReadWrite" + create_option = "FromImage" + } + + os_profile { + computer_name = "${var.hostname}" + admin_username = "${var.admin_username}" + admin_password = "${var.admin_password}" + } + + os_profile_linux_config { + disable_password_authentication = false + } +} diff --git a/examples/vm-from-user-image/outputs.tf b/examples/vm-from-user-image/outputs.tf new file mode 100644 index 000000000000..58a17046f1f4 --- /dev/null +++ b/examples/vm-from-user-image/outputs.tf @@ -0,0 +1,11 @@ +output "hostname" { + value = "${var.hostname}" +} + +output "vm_fqdn" { + value = "${azurerm_public_ip.pip.fqdn}" +} + +output "ssh_command" { + value = "${concat("ssh ", var.admin_username, "@", azurerm_public_ip.pip.fqdn)}" +} diff --git a/examples/vm-from-user-image/variables.tf b/examples/vm-from-user-image/variables.tf new file mode 100644 index 000000000000..133c02bbbf5c --- /dev/null +++ b/examples/vm-from-user-image/variables.tf @@ -0,0 +1,55 @@ +variable "resource_group" { + description = "The name of the resource group in which the image to clone resides." + default = "myrg" +} + +variable "image_uri" { + description = "Specifies the image_uri in the form publisherName:offer:skus:version. image_uri can also specify the VHD uri of a custom VM image to clone." +} + +variable "os_type" { + description = "Specifies the operating system Type, valid values are windows, linux." + default = "linux" +} + +variable "location" { + description = "The location/region where the virtual network is created. Changing this forces a new resource to be created." + default = "southcentralus" +} + +variable "address_space" { + description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created." + default = "10.0.0.0/24" +} + +variable "subnet_prefix" { + description = "The address prefix to use for the subnet." + default = "10.0.0.0/24" +} + +variable "storage_account_name" { + description = "The name of the storage account in which the image from which you are cloning resides." +} + +variable "storage_account_type" { + description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types." + default = "Premium_LRS" +} + +variable "vm_size" { + description = "Specifies the size of the virtual machine. This must be the same as the vm image from which you are copying." + default = "Standard_DS1_v2" +} + +variable "hostname" { + description = "VM name referenced also in storage-related names. This is also used as the label for the Domain Name and to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system." +} + +variable "admin_username" { + description = "administrator user name" + default = "vmadmin" +} + +variable "admin_password" { + description = "The Password for the account specified in the 'admin_username' field. We recommend disabling Password Authentication in a Production environment." +} diff --git a/examples/vm-simple-linux-managed-disk/README.md b/examples/vm-simple-linux-managed-disk/README.md new file mode 100644 index 000000000000..4a6b2ef9b239 --- /dev/null +++ b/examples/vm-simple-linux-managed-disk/README.md @@ -0,0 +1,22 @@ +# Very simple deployment of a Linux VM + +This template allows you to deploy a simple Linux VM using a few different options for the Ubuntu version, using the latest patched version. This will deploy an A0 size VM in the resource group location and return the FQDN of the VM. + +This template takes a minimum amount of parameters and deploys a Linux VM, using the latest patched version. + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + +![graph](/examples/azure-vm-simple-linux-managed-disk/graph.png) diff --git a/examples/vm-simple-linux-managed-disk/main.tf b/examples/vm-simple-linux-managed-disk/main.tf new file mode 100644 index 000000000000..5dc9ce1cb086 --- /dev/null +++ b/examples/vm-simple-linux-managed-disk/main.tf @@ -0,0 +1,108 @@ +# provider "azurerm" { +# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID" +# client_id = "REPLACE-WITH-YOUR-CLIENT-ID" +# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET" +# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_virtual_network" "vnet" { + name = "${var.virtual_network_name}" + location = "${var.location}" + address_space = ["${var.address_space}"] + resource_group_name = "${azurerm_resource_group.rg.name}" +} + +resource "azurerm_subnet" "subnet" { + name = "${var.rg_prefix}subnet" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_prefix = "${var.subnet_prefix}" +} + +resource "azurerm_network_interface" "nic" { + name = "${var.rg_prefix}nic" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + + ip_configuration { + name = "${var.rg_prefix}ipconfig" + subnet_id = "${azurerm_subnet.subnet.id}" + private_ip_address_allocation = "Dynamic" + public_ip_address_id = "${azurerm_public_ip.pip.id}" + } +} + +resource "azurerm_public_ip" "pip" { + name = "${var.rg_prefix}-ip" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + public_ip_address_allocation = "Dynamic" + domain_name_label = "${var.dns_name}" +} + +resource "azurerm_storage_account" "stor" { + name = "${var.dns_name}stor" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + account_type = "${var.storage_account_type}" +} + +resource "azurerm_managed_disk" "datadisk" { + name = "${var.hostname}-datadisk" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + storage_account_type = "Standard_LRS" + create_option = "Empty" + disk_size_gb = "1023" +} + +resource "azurerm_virtual_machine" "vm" { + name = "${var.rg_prefix}vm" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + vm_size = "${var.vm_size}" + network_interface_ids = ["${azurerm_network_interface.nic.id}"] + + storage_image_reference { + publisher = "${var.image_publisher}" + offer = "${var.image_offer}" + sku = "${var.image_sku}" + version = "${var.image_version}" + } + + storage_os_disk { + name = "${var.hostname}-osdisk" + managed_disk_type = "Standard_LRS" + caching = "ReadWrite" + create_option = "FromImage" + } + + storage_data_disk { + name = "${var.hostname}-datadisk" + managed_disk_id = "${azurerm_managed_disk.datadisk.id}" + managed_disk_type = "Standard_LRS" + disk_size_gb = "1023" + create_option = "Attach" + lun = 0 + } + + os_profile { + computer_name = "${var.hostname}" + admin_username = "${var.admin_username}" + admin_password = "${var.admin_password}" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + boot_diagnostics { + enabled = true + storage_uri = "${azurerm_storage_account.stor.primary_blob_endpoint}" + } +} \ No newline at end of file diff --git a/examples/vm-simple-linux-managed-disk/outputs.tf b/examples/vm-simple-linux-managed-disk/outputs.tf new file mode 100644 index 000000000000..32c6294ceeab --- /dev/null +++ b/examples/vm-simple-linux-managed-disk/outputs.tf @@ -0,0 +1,11 @@ +output "hostname" { + value = "${var.hostname}" +} + +output "vm_fqdn" { + value = "${azurerm_public_ip.pip.fqdn}" +} + +output "ssh_command" { + value = "ssh ${var.admin_username}@${azurerm_public_ip.pip.fqdn}" +} \ No newline at end of file diff --git a/examples/vm-simple-linux-managed-disk/variables.tf b/examples/vm-simple-linux-managed-disk/variables.tf new file mode 100644 index 000000000000..91024000bc03 --- /dev/null +++ b/examples/vm-simple-linux-managed-disk/variables.tf @@ -0,0 +1,75 @@ +variable "resource_group" { + description = "The name of the resource group in which to create the virtual network." +} + +variable "rg_prefix" { + description = "The shortened abbreviation to represent your resource group that will go on the front of some resources." + default = "rg" +} + +variable "hostname" { + description = "VM name referenced also in storage-related names." +} + +variable "dns_name" { + description = " Label for the Domain Name. Will be used to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system." +} + +variable "location" { + description = "The location/region where the virtual network is created. Changing this forces a new resource to be created." + default = "southcentralus" +} + +variable "virtual_network_name" { + description = "The name for the virtual network." + default = "vnet" +} + +variable "address_space" { + description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created." + default = "10.0.0.0/16" +} + +variable "subnet_prefix" { + description = "The address prefix to use for the subnet." + default = "10.0.10.0/24" +} + +variable "storage_account_type" { + description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types." + default = "Standard_LRS" +} + +variable "vm_size" { + description = "Specifies the size of the virtual machine." + default = "Standard_A0" +} + +variable "image_publisher" { + description = "name of the publisher of the image (az vm image list)" + default = "Canonical" +} + +variable "image_offer" { + description = "the name of the offer (az vm image list)" + default = "UbuntuServer" +} + +variable "image_sku" { + description = "image sku to apply (az vm image list)" + default = "16.04-LTS" +} + +variable "image_version" { + description = "version of the image to apply (az vm image list)" + default = "latest" +} + +variable "admin_username" { + description = "administrator user name" + default = "vmadmin" +} + +variable "admin_password" { + description = "administrator password (recommended to disable password auth)" +} \ No newline at end of file diff --git a/examples/vm-specialized-vhd-existing-vnet/README.md b/examples/vm-specialized-vhd-existing-vnet/README.md new file mode 100644 index 000000000000..3afc0f8471c5 --- /dev/null +++ b/examples/vm-specialized-vhd-existing-vnet/README.md @@ -0,0 +1,35 @@ +# Create a specialized virtual machine in an existing virtual network [![Build Status](https://travis-ci.org/harijayms/terraform.svg?branch=topic-201-vm-specialized-vhd-existing-vnet)](https://travis-ci.org/harijayms/terraform) + +This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-vm-specialized-vhd-existing-vnet) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template. + +## Prerequisites + +- VHD file from which to create a VM that already exists in a storage account +- Name of the existing VNET and subnet to which the new virtual machine will connect +- Name of the Resource Group in which the VNET resides + + +### NOTE + +This template will create an additional Standard_GRS storage account for enabling boot diagnostics each time you execute this template. To avoid running into storage account limits, it is best to delete the storage account when the VM is deleted. + +This template creates a VM from a specialized VHD and lets you connect it to an existing VNET that can reside in a different Resource Group from which the virtual machine resides. + +_Please note: This deployment template does not create or attach an existing Network Security Group to the virtual machine._ + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + +![graph](/examples/azure-vm-specialized-vhd-existing-vnet/graph.png) diff --git a/examples/vm-specialized-vhd-existing-vnet/main.tf b/examples/vm-specialized-vhd-existing-vnet/main.tf new file mode 100644 index 000000000000..821ee8c31c63 --- /dev/null +++ b/examples/vm-specialized-vhd-existing-vnet/main.tf @@ -0,0 +1,71 @@ +# provider "azurerm" { +# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID" +# client_id = "REPLACE-WITH-YOUR-CLIENT-ID" +# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET" +# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_public_ip" "pip" { + name = "PublicIp" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + public_ip_address_allocation = "Dynamic" + domain_name_label = "${var.hostname}" +} + +resource "azurerm_network_interface" "nic" { + name = "nic" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + + ip_configuration { + name = "ipconfig" + subnet_id = "${var.existing_subnet_id}" + private_ip_address_allocation = "Dynamic" + public_ip_address_id = "${azurerm_public_ip.pip.id}" + } +} + +resource "azurerm_storage_account" "stor" { + name = "${var.hostname}" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${var.location}" + account_type = "${var.storage_account_type}" +} + +resource "azurerm_virtual_machine" "vm" { + name = "${var.hostname}" + location = "${var.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + vm_size = "${var.vm_size}" + network_interface_ids = ["${azurerm_network_interface.nic.id}"] + + storage_os_disk { + name = "${var.hostname}osdisk1" + image_uri = "${var.os_disk_vhd_uri}" + vhd_uri = "https://${var.existing_storage_acct}.blob.core.windows.net/${var.existing_vnet_resource_group}-vhds/${var.hostname}osdisk.vhd" + os_type = "${var.os_type}" + caching = "ReadWrite" + create_option = "FromImage" + } + + os_profile { + computer_name = "${var.hostname}" + admin_username = "${var.admin_username}" + admin_password = "${var.admin_password}" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + boot_diagnostics { + enabled = true + storage_uri = "${azurerm_storage_account.stor.primary_blob_endpoint}" + } +} diff --git a/examples/vm-specialized-vhd-existing-vnet/outputs.tf b/examples/vm-specialized-vhd-existing-vnet/outputs.tf new file mode 100644 index 000000000000..13768e554d6c --- /dev/null +++ b/examples/vm-specialized-vhd-existing-vnet/outputs.tf @@ -0,0 +1,11 @@ +output "hostname" { + value = "${var.hostname}" +} + +output "vm_fqdn" { + value = "${azurerm_public_ip.pip.fqdn}" +} + +output "ssh_command" { + value = "ssh ${var.admin_username}@${azurerm_public_ip.pip.fqdn}" +} diff --git a/examples/vm-specialized-vhd-existing-vnet/variables.tf b/examples/vm-specialized-vhd-existing-vnet/variables.tf new file mode 100644 index 000000000000..4e53919a84d0 --- /dev/null +++ b/examples/vm-specialized-vhd-existing-vnet/variables.tf @@ -0,0 +1,90 @@ +variable "resource_group" { + description = "Name of the resource group in which to deploy your new Virtual Machine" +} + +variable "existing_vnet_resource_group" { + description = "Name of the existing resource group in which the existing vnet resides" +} + +variable "location" { + description = "The location/region where the virtual network resides." + default = "southcentralus" +} + +variable "hostname" { + description = "This variable is used in this template to create the domain name label as well as the virtual machine name. Must be unique." +} + +variable "os_type" { + description = "Type of OS on the existing vhd. Allowed values: 'windows' or 'linux'." + default = "linux" +} + +variable "os_disk_vhd_uri" { + description = "Uri of the existing VHD in ARM standard or premium storage" +} + +variable "existing_storage_acct" { + description = "The name of the storage account in which your existing VHD and image reside" +} + +variable "existing_virtual_network_name" { + description = "The name for the existing virtual network" +} + +variable "existing_subnet_name" { + description = "The name for the existing subnet in the existing virtual network" +} + +variable "existing_subnet_id" { + description = "The id for the existing subnet in the existing virtual network" +} + +variable "address_space" { + description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created." + default = "10.0.0.0/16" +} + +variable "subnet_prefix" { + description = "The address prefix to use for the subnet." + default = "10.0.10.0/24" +} + +variable "storage_account_type" { + description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types." + default = "Standard_GRS" +} + +variable "vm_size" { + description = "Specifies the size of the virtual machine." + default = "Standard_DS1_v2" +} + +variable "image_publisher" { + description = "name of the publisher of the image (az vm image list)" + default = "Canonical" +} + +variable "image_offer" { + description = "the name of the offer (az vm image list)" + default = "UbuntuServer" +} + +variable "image_sku" { + description = "image sku to apply (az vm image list)" + default = "16.04-LTS" +} + +variable "image_version" { + description = "version of the image to apply (az vm image list)" + default = "latest" +} + +variable "admin_username" { + description = "administrator user name" + default = "vmadmin" +} + +variable "admin_password" { + description = "administrator password (recommended to disable password auth)" +} diff --git a/examples/vmss-ubuntu/README.md b/examples/vmss-ubuntu/README.md new file mode 100644 index 000000000000..f6208df84300 --- /dev/null +++ b/examples/vmss-ubuntu/README.md @@ -0,0 +1,22 @@ +# Linux VM Scale Set + +This template deploys a desired count Linux VM Scale Set. Once the VMSS is deployed, the user can deploy an application inside each of the VMs (either by directly logging into the VMs or via a [`remote-exec` provisioner](https://www.terraform.io/docs/provisioners/remote-exec.html)). + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file. + +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + +![`terraform graph`](/examples/azure-vmss-ubuntu/graph.png) diff --git a/examples/vmss-ubuntu/main.tf b/examples/vmss-ubuntu/main.tf new file mode 100644 index 000000000000..84480abbd7d4 --- /dev/null +++ b/examples/vmss-ubuntu/main.tf @@ -0,0 +1,127 @@ +# provider "azurerm" { +# subscription_id = "${var.subscription_id}" +# client_id = "${var.client_id}" +# client_secret = "${var.client_secret}" +# tenant_id = "${var.tenant_id}" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_virtual_network" "vnet" { + name = "${var.resource_group}vnet" + location = "${azurerm_resource_group.rg.location}" + address_space = ["10.0.0.0/16"] + resource_group_name = "${azurerm_resource_group.rg.name}" +} + +resource "azurerm_subnet" "subnet" { + name = "subnet" + address_prefix = "10.0.0.0/24" + resource_group_name = "${azurerm_resource_group.rg.name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" +} + +resource "azurerm_public_ip" "pip" { + name = "${var.hostname}-pip" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + public_ip_address_allocation = "Dynamic" + domain_name_label = "${var.hostname}" +} + +resource "azurerm_lb" "lb" { + name = "LoadBalancer" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + depends_on = ["azurerm_public_ip.pip"] + + frontend_ip_configuration { + name = "LBFrontEnd" + public_ip_address_id = "${azurerm_public_ip.pip.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "backlb" { + name = "BackEndAddressPool" + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" +} + +resource "azurerm_lb_nat_pool" "np" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + name = "NATPool" + protocol = "Tcp" + frontend_port_start = 50000 + frontend_port_end = 50119 + backend_port = 22 + frontend_ip_configuration_name = "LBFrontEnd" +} + +resource "azurerm_storage_account" "stor" { + name = "${var.resource_group}stor" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + account_type = "${var.storage_account_type}" +} + +resource "azurerm_storage_container" "vhds" { + name = "vhds" + resource_group_name = "${azurerm_resource_group.rg.name}" + storage_account_name = "${azurerm_storage_account.stor.name}" + container_access_type = "blob" +} + +resource "azurerm_virtual_machine_scale_set" "scaleset" { + name = "autoscalewad" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + upgrade_policy_mode = "Manual" + overprovision = true + depends_on = ["azurerm_lb.lb", "azurerm_virtual_network.vnet"] + + sku { + name = "${var.vm_sku}" + tier = "Standard" + capacity = "${var.instance_count}" + } + + os_profile { + computer_name_prefix = "${var.vmss_name}" + admin_username = "${var.admin_username}" + admin_password = "${var.admin_password}" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + network_profile { + name = "${var.hostname}-nic" + primary = true + + ip_configuration { + name = "${var.hostname}ipconfig" + subnet_id = "${azurerm_subnet.subnet.id}" + load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.backlb.id}"] + load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_pool.np.*.id, count.index)}"] + } + } + + storage_profile_os_disk { + name = "${var.hostname}" + caching = "ReadWrite" + create_option = "FromImage" + vhd_containers = ["${azurerm_storage_account.stor.primary_blob_endpoint}${azurerm_storage_container.vhds.name}"] + } + + storage_profile_image_reference { + publisher = "${var.image_publisher}" + offer = "${var.image_offer}" + sku = "${var.ubuntu_os_version}" + version = "latest" + } +} diff --git a/examples/vmss-ubuntu/outputs.tf b/examples/vmss-ubuntu/outputs.tf new file mode 100644 index 000000000000..3eba047a26fb --- /dev/null +++ b/examples/vmss-ubuntu/outputs.tf @@ -0,0 +1,3 @@ +output "hostname" { + value = "${var.vmss_name}" +} diff --git a/examples/vmss-ubuntu/variables.tf b/examples/vmss-ubuntu/variables.tf new file mode 100644 index 000000000000..513ce167b41e --- /dev/null +++ b/examples/vmss-ubuntu/variables.tf @@ -0,0 +1,59 @@ +# variable "subscription_id" {} +# variable "client_id" {} +# variable "client_secret" {} +# variable "tenant_id" {} + +variable "resource_group" { + description = "The name of the resource group in which to create the virtual network." +} + +variable "location" { + description = "The location/region where the virtual network is created. Changing this forces a new resource to be created." + default = "southcentralus" +} + +variable "storage_account_type" { + description = "Specifies the type of the storage account" + default = "Standard_LRS" +} + +variable "hostname" { + description = "A string that determines the hostname/IP address of the origin server. This string could be a domain name, IPv4 address or IPv6 address." +} + +variable "vm_sku" { + description = "Size of VMs in the VM Scale Set." + default = "Standard_A1" +} + +variable "ubuntu_os_version" { + description = "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values are: 15.10, 14.04.4-LTS." + default = "16.04.0-LTS" +} + +variable "image_publisher" { + description = "The name of the publisher of the image (az vm image list)" + default = "Canonical" +} + +variable "image_offer" { + description = "The name of the offer (az vm image list)" + default = "UbuntuServer" +} + +variable "vmss_name" { + description = "String used as a base for naming resources. Must be 3-61 characters in length and globally unique across Azure. A hash is prepended to this string for some resources, and resource-specific information is appended." +} + +variable "instance_count" { + description = "Number of VM instances (100 or less)." + default = "5" +} + +variable "admin_username" { + description = "Admin username on all VMs." +} + +variable "admin_password" { + description = "Admin password on all VMs." +} diff --git a/examples/vnet-to-vnet-peering/README.md b/examples/vnet-to-vnet-peering/README.md new file mode 100644 index 000000000000..36a90a46ffe8 --- /dev/null +++ b/examples/vnet-to-vnet-peering/README.md @@ -0,0 +1,24 @@ +# VNET to VNET Peering + +This template creates two VNETs in the same location, each containing a single subnet, and creates connections between them using VNET Peering. + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file. + +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +If you are committing this template to source control, please insure that you add this file to your `.gitignore` file. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + +![`terraform graph`](/examples/azure-vnet-to-vnet-peering/graph.png) diff --git a/examples/vnet-to-vnet-peering/main.tf b/examples/vnet-to-vnet-peering/main.tf new file mode 100644 index 000000000000..6bdfb8a240e3 --- /dev/null +++ b/examples/vnet-to-vnet-peering/main.tf @@ -0,0 +1,56 @@ +# provider "azurerm" { +# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID" +# client_id = "REPLACE-WITH-YOUR-CLIENT-ID" +# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET" +# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_virtual_network" "vnet1" { + name = "${var.resource_group}-vnet1" + location = "${var.location}" + address_space = ["10.0.0.0/24"] + resource_group_name = "${azurerm_resource_group.rg.name}" + + subnet { + name = "subnet1" + address_prefix = "10.0.0.0/24" + } +} + +resource "azurerm_virtual_network" "vnet2" { + name = "${var.resource_group}-vnet2" + location = "${var.location}" + address_space = ["192.168.0.0/24"] + resource_group_name = "${azurerm_resource_group.rg.name}" + + subnet { + name = "subnet1" + address_prefix = "192.168.0.0/24" + } +} + +resource "azurerm_virtual_network_peering" "peer1" { + name = "vNet1-to-vNet2" + resource_group_name = "${azurerm_resource_group.rg.name}" + virtual_network_name = "${azurerm_virtual_network.vnet1.name}" + remote_virtual_network_id = "${azurerm_virtual_network.vnet2.id}" + allow_virtual_network_access = true + allow_forwarded_traffic = false + allow_gateway_transit = false +} + +resource "azurerm_virtual_network_peering" "peer2" { + name = "vNet2-to-vNet1" + resource_group_name = "${azurerm_resource_group.rg.name}" + virtual_network_name = "${azurerm_virtual_network.vnet2.name}" + remote_virtual_network_id = "${azurerm_virtual_network.vnet1.id}" + allow_virtual_network_access = true + allow_forwarded_traffic = false + allow_gateway_transit = false + use_remote_gateways = false +} diff --git a/examples/vnet-to-vnet-peering/variables.tf b/examples/vnet-to-vnet-peering/variables.tf new file mode 100644 index 000000000000..2701af343e0f --- /dev/null +++ b/examples/vnet-to-vnet-peering/variables.tf @@ -0,0 +1,9 @@ +variable "resource_group" { + description = "The name of the resource group in which the virtual networks are created" + default = "myrg" +} + +variable "location" { + description = "The location/region where the virtual networks are created. Changing this forces a new resource to be created." + default = "southcentralus" +} diff --git a/examples/vnet-two-subnets/README.md b/examples/vnet-two-subnets/README.md new file mode 100644 index 000000000000..d8c36ea6cf2c --- /dev/null +++ b/examples/vnet-two-subnets/README.md @@ -0,0 +1,20 @@ +# Virtual Network with Two Subnets + +This template allows you to create a Virtual Network with two subnets. + +## main.tf +The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. + +![graph](/examples/azure-vnet-two-subnets/graph.png) diff --git a/examples/vnet-two-subnets/main.tf b/examples/vnet-two-subnets/main.tf new file mode 100644 index 000000000000..aee3593f3efa --- /dev/null +++ b/examples/vnet-two-subnets/main.tf @@ -0,0 +1,32 @@ +# provider "azurerm" { +# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID" +# client_id = "REPLACE-WITH-YOUR-CLIENT-ID" +# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET" +# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID" +# } + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +resource "azurerm_virtual_network" "vnet" { + name = "${var.resource_group}vnet" + location = "${var.location}" + address_space = ["10.0.0.0/16"] + resource_group_name = "${azurerm_resource_group.rg.name}" +} + +resource "azurerm_subnet" "subnet1" { + name = "subnet1" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_prefix = "10.0.0.0/24" +} + +resource "azurerm_subnet" "subnet2" { + name = "subnet2" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + address_prefix = "10.0.1.0/24" +} diff --git a/examples/vnet-two-subnets/variables.tf b/examples/vnet-two-subnets/variables.tf new file mode 100644 index 000000000000..8d5dd4131636 --- /dev/null +++ b/examples/vnet-two-subnets/variables.tf @@ -0,0 +1,8 @@ +variable "resource_group" { + description = "The name of the resource group in which to create the virtual network." +} + +variable "location" { + description = "The location/region where the virtual network is created. Changing this forces a new resource to be created." + default = "southcentralus" +} diff --git a/examples/wordpress-mysql-replication/README.md b/examples/wordpress-mysql-replication/README.md new file mode 100644 index 000000000000..b080397bc6e5 --- /dev/null +++ b/examples/wordpress-mysql-replication/README.md @@ -0,0 +1,41 @@ +# Deploys a WordPress web site backed by MySQL master-slave replication + +This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/wordpress-mysql-replication) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected here. + +This template deploys a WordPress site in Azure backed by MySQL replication with one master and one slave server. It has the following capabilities: + +- Installs and configures GTID based MySQL replication on CentOS 6 +- Deploys a load balancer in front of the 2 MySQL VMs +- MySQL, SSH, and MySQL probe ports are exposed through the load balancer using Network Security Group rules. +- WordPress accesses MySQL through the load balancer. +- Configures an http based health probe for each MySQL instance that can be used to monitor MySQL health. +- WordPress deployment starts immediately after MySQL deployment finishes. +- Details about MySQL management, including failover, can be found [here](https://github.com/Azure/azure-quickstart-templates/tree/master/mysql-replication). + +If you would like to leverage an existing VNET, then please see the [documentation here](https://www.terraform.io/docs/import/index.html) to learn about importing existing resources into Terraform and bringing them under state management by this template. To import your existing VNET, you may use this command. + +``` +terraform import azurerm_virtual_network.testNetwork /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/ +``` + +## main.tf +The `main.tf` file contains the resources necessary for the MySql replication deployment that will be created. It also contains the Azure Resource Group definition and any defined variables. + +## website.tf +The `website.tf` contains an `azurerm_template_deployment` that will deploy the Wordpress website. + +## outputs.tf +This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command. + +## provider.tf +You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file. + +Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file. + +## terraform.tfvars +If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it. + +If you are committing this template to source control, please insure that you add this file to your `.gitignore` file. + +## variables.tf +The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template. diff --git a/examples/wordpress-mysql-replication/main.tf b/examples/wordpress-mysql-replication/main.tf new file mode 100644 index 000000000000..a91933b1d171 --- /dev/null +++ b/examples/wordpress-mysql-replication/main.tf @@ -0,0 +1,244 @@ +# provider "azurerm" { +# subscription_id = "${var.subscription_id}" +# client_id = "${var.client_id}" +# client_secret = "${var.client_secret}" +# tenant_id = "${var.tenant_id}" +# } + +# ********************** MYSQL REPLICATION ********************** # + +resource "azurerm_resource_group" "rg" { + name = "${var.resource_group}" + location = "${var.location}" +} + +# ********************** VNET / SUBNET ********************** # +resource "azurerm_virtual_network" "vnet" { + name = "${var.virtual_network_name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + address_space = ["${var.vnet_address_prefix}"] +} + +resource "azurerm_subnet" "db_subnet" { + name = "${var.db_subnet_name}" + virtual_network_name = "${azurerm_virtual_network.vnet.name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + network_security_group_id = "${azurerm_network_security_group.nsg.id}" + address_prefix = "${var.db_subnet_address_prefix}" + depends_on = ["azurerm_virtual_network.vnet"] +} + +# ********************** STORAGE ACCOUNTS ********************** # +resource "azurerm_storage_account" "stor" { + name = "${var.unique_prefix}${var.storage_account_name}" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + account_type = "${var.storage_account_type}" +} + +# ********************** NETWORK SECURITY GROUP ********************** # +resource "azurerm_network_security_group" "nsg" { + name = "${var.unique_prefix}-nsg" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + + security_rule { + name = "allow-ssh" + description = "Allow SSH" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "Internet" + destination_address_prefix = "*" + } + + security_rule { + name = "MySQL" + description = "MySQL" + priority = 110 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "3306" + source_address_prefix = "*" + destination_address_prefix = "*" + } +} + +# ********************** PUBLIC IP ADDRESSES ********************** # +resource "azurerm_public_ip" "pip" { + name = "${var.public_ip_name}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + public_ip_address_allocation = "Static" + domain_name_label = "${var.dns_name}" +} + +# ********************** AVAILABILITY SET ********************** # +resource "azurerm_availability_set" "availability_set" { + name = "${var.dns_name}-set" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" +} + +# ********************** NETWORK INTERFACES ********************** # +resource "azurerm_network_interface" "nic" { + name = "${var.nic_name}${count.index}" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + network_security_group_id = "${azurerm_network_security_group.nsg.id}" + count = "${var.node_count}" + depends_on = ["azurerm_virtual_network.vnet", "azurerm_public_ip.pip", "azurerm_lb.lb"] + + ip_configuration { + name = "ipconfig${count.index}" + subnet_id = "${azurerm_subnet.db_subnet.id}" + private_ip_address_allocation = "Static" + private_ip_address = "10.0.1.${count.index + 4}" + load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.backend_pool.id}"] + + load_balancer_inbound_nat_rules_ids = [ + "${element(azurerm_lb_nat_rule.NatRule0.*.id, count.index)}", + "${element(azurerm_lb_nat_rule.MySQLNatRule0.*.id, count.index)}", + "${element(azurerm_lb_nat_rule.ProbeNatRule0.*.id, count.index)}", + ] + } +} + +# ********************** LOAD BALANCER ********************** # +resource "azurerm_lb" "lb" { + name = "${var.dns_name}-lb" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + depends_on = ["azurerm_public_ip.pip"] + + frontend_ip_configuration { + name = "${var.dns_name}-sshIPCfg" + public_ip_address_id = "${azurerm_public_ip.pip.id}" + } +} + +resource "azurerm_lb_backend_address_pool" "backend_pool" { + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + name = "${var.dns_name}-ilbBackendPool" +} + +# ********************** LOAD BALANCER INBOUND NAT RULES ********************** # +resource "azurerm_lb_nat_rule" "NatRule0" { + name = "${var.dns_name}-NatRule-${count.index}" + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + protocol = "tcp" + frontend_port = "6400${count.index + 1}" + backend_port = 22 + frontend_ip_configuration_name = "${var.dns_name}-sshIPCfg" + count = "${var.node_count}" + depends_on = ["azurerm_lb.lb"] +} + +resource "azurerm_lb_nat_rule" "MySQLNatRule0" { + name = "${var.dns_name}-MySQLNatRule-${count.index}" + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + protocol = "tcp" + frontend_port = "330${count.index + 6}" + backend_port = 3306 + frontend_ip_configuration_name = "${var.dns_name}-sshIPCfg" + count = "${var.node_count}" + depends_on = ["azurerm_lb.lb"] +} + +resource "azurerm_lb_nat_rule" "ProbeNatRule0" { + name = "${var.dns_name}-ProbeNatRule-${count.index}" + resource_group_name = "${azurerm_resource_group.rg.name}" + loadbalancer_id = "${azurerm_lb.lb.id}" + protocol = "tcp" + frontend_port = "920${count.index}" + backend_port = 9200 + frontend_ip_configuration_name = "${var.dns_name}-sshIPCfg" + count = "${var.node_count}" + depends_on = ["azurerm_lb.lb"] +} + +# ********************** VIRTUAL MACHINES ********************** # +resource "azurerm_virtual_machine" "vm" { + name = "${var.dns_name}${count.index}" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + vm_size = "${var.vm_size}" + network_interface_ids = ["${element(azurerm_network_interface.nic.*.id, count.index)}"] + count = "${var.node_count}" + availability_set_id = "${azurerm_availability_set.availability_set.id}" + depends_on = ["azurerm_availability_set.availability_set", "azurerm_network_interface.nic", "azurerm_storage_account.stor"] + + storage_image_reference { + publisher = "${var.image_publisher}" + offer = "${var.image_offer}" + sku = "${var.os_version}" + version = "latest" + } + + storage_os_disk { + name = "osdisk${count.index}" + vhd_uri = "https://${azurerm_storage_account.stor.name}.blob.core.windows.net/vhds/${var.dns_name}${count.index}-osdisk.vhd" + create_option = "FromImage" + caching = "ReadWrite" + } + + os_profile { + computer_name = "${var.dns_name}${count.index}" + admin_username = "${var.vm_admin_username}" + admin_password = "${var.vm_admin_password}" + } + + storage_data_disk { + name = "datadisk1" + vhd_uri = "https://${azurerm_storage_account.stor.name}.blob.core.windows.net/vhds/${var.dns_name}${count.index}-datadisk1.vhd" + disk_size_gb = "1000" + create_option = "Empty" + lun = 0 + } + + storage_data_disk { + name = "datadisk2" + vhd_uri = "https://${azurerm_storage_account.stor.name}.blob.core.windows.net/vhds/${var.dns_name}${count.index}-datadisk2.vhd" + disk_size_gb = "1000" + create_option = "Empty" + lun = 1 + } + + os_profile_linux_config { + disable_password_authentication = false + } +} + +resource "azurerm_virtual_machine_extension" "setup_mysql" { + name = "${var.dns_name}-${count.index}-setupMySQL" + resource_group_name = "${azurerm_resource_group.rg.name}" + location = "${azurerm_resource_group.rg.location}" + virtual_machine_name = "${element(azurerm_virtual_machine.vm.*.name, count.index)}" + publisher = "Microsoft.Azure.Extensions" + type = "CustomScript" + type_handler_version = "2.0" + auto_upgrade_minor_version = true + count = "${var.node_count}" + depends_on = ["azurerm_virtual_machine.vm", "azurerm_lb_nat_rule.ProbeNatRule0"] + + settings = <