Skip to content

Commit

Permalink
AKS: Add-On Profiles (#1751)
Browse files Browse the repository at this point in the history
* Added kubernetes_cluster advanced network creation

* Changed network_profile from TypeSet to TypeList

* Added kubernetes_cluster advanced network creation

* Changed network_profile from TypeSet to TypeList

* Updated ForceNew attributes on kubernetes_cluster.network_profile params

* Added advanced network read functionality

* Added tests for kubernetes_cluster.network_profile

* Fixed indentations

* Fixed function name in tests

* Fixed property name in test

* Fixed variable name

* working on data source for kubernetes cluster

* Fixed issue with datasource not working

* Added network_policy and fixed read

* Removed network_policy as unsupported

* Added example of advanced networking

* Added test for data_source

* Made address space more sensible

* Removing kubenet test.
This is default when network_profile is not specified.
We already test this.

* Made network_plugin a mandatory field.
Better to make this explicit.

* Updated documentation to include network_profile

* Fixing the build

* Added ForceNew to network_profile

* Made the docs a little clearer

* Documenting a required field

* Rephrasing the data sources

* Making the `network_profile` block computed

Tests pass:

```
$ acctests azurerm TestAccAzureRMKubernetesCluster_basic
=== RUN   TestAccAzureRMKubernetesCluster_basic
--- PASS: TestAccAzureRMKubernetesCluster_basic (1329.57s)
PASS
ok  	github.com/terraform-providers/terraform-provider-azurerm/azurerm	1329.613s
```

* Fixing the field we're asserting on

* Conditionally expanding the network_profile block.

The ip fields are now Computed too

* Validation to ensure that a Subnet ID is assigned to the cluster when the network_profile blocks specified

Example:

```

Error: Error running plan: 1 error(s) occurred:

* azurerm_kubernetes_cluster.test: 1 error(s) occurred:

* azurerm_kubernetes_cluster.test: If a `network_profile` block is specified, the Agent Pools must be attached to a Subnet

```

* Updating the documentation to mention the new behaviour

* Adding independent tests for Kubenet and Azure networking profiles

* Conditional validation of the `docker_bridge_cidr`, `dns_service_ip` and `service_cidr` fields.

* Updating the docs

* Fixing the tests

* Adding back in the changes from PR #1502 since they were lost in the merge

* Renaming the methods to `addonProfiles` to match the sdk

* fixing the quotes
  • Loading branch information
tombuildsstuff authored Aug 14, 2018
1 parent 590dd40 commit 531df6d
Show file tree
Hide file tree
Showing 6 changed files with 585 additions and 53 deletions.
104 changes: 98 additions & 6 deletions azurerm/data_source_kubernetes_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,48 @@ func dataSourceArmKubernetesCluster() *schema.Resource {
},
},

"addon_profile": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"http_application_routing": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
"http_application_routing_zone_name": {
Type: schema.TypeString,
Computed: true,
},
},
},
},

"oms_agent": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enabled": {
Type: schema.TypeBool,
Computed: true,
},
"log_analytics_workspace_id": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
},
},
},

"tags": tagsForDataSourceSchema(),
},
}
Expand Down Expand Up @@ -250,23 +292,27 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}
return fmt.Errorf("Error setting `linux_profile`: %+v", err)
}

addonProfiles := flattenKubernetesClusterDataSourceAddonProfiles(props.AddonProfiles)
if err := d.Set("addon_profile", addonProfiles); err != nil {
return fmt.Errorf("Error setting `addon_profile`: %+v", err)
}

agentPoolProfiles := flattenKubernetesClusterDataSourceAgentPoolProfiles(props.AgentPoolProfiles)
if err := d.Set("agent_pool_profile", agentPoolProfiles); err != nil {
return fmt.Errorf("Error setting `agent_pool_profile`: %+v", err)
}

networkProfile := flattenKubernetesClusterDataSourceNetworkProfile(props.NetworkProfile)
if err := d.Set("network_profile", networkProfile); err != nil {
return fmt.Errorf("Error setting `network_profile`: %+v", err)
}

servicePrincipal := flattenKubernetesClusterDataSourceServicePrincipalProfile(resp.ManagedClusterProperties.ServicePrincipalProfile)
if err := d.Set("service_principal", servicePrincipal); err != nil {
return fmt.Errorf("Error setting `service_principal`: %+v", err)
}
}

networkProfile := flattenKubernetesClusterDataSourceNetworkProfile(resp.NetworkProfile)

if err := d.Set("network_profile", networkProfile); err != nil {
return fmt.Errorf("Error setting `network_profile`: %+v", err)
}

kubeConfigRaw, kubeConfig := flattenKubernetesClusterDataSourceAccessProfile(&profile)
d.Set("kube_config_raw", kubeConfigRaw)

Expand Down Expand Up @@ -428,3 +474,49 @@ func flattenKubernetesClusterDataSourceNetworkProfile(profile *containerservice.

return []interface{}{values}
}

func flattenKubernetesClusterDataSourceAddonProfiles(profile map[string]*containerservice.ManagedClusterAddonProfile) interface{} {
values := make(map[string]interface{}, 0)

routes := make([]interface{}, 0)
if httpApplicationRouting := profile["httpApplicationRouting"]; httpApplicationRouting != nil {
enabled := false
if enabledVal := httpApplicationRouting.Enabled; enabledVal != nil {
enabled = *enabledVal
}

zoneName := ""
if v := httpApplicationRouting.Config["HTTPApplicationRoutingZoneName"]; v != nil {
zoneName = *v
}

output := map[string]interface{}{
"enabled": enabled,
"http_application_routing_zone_name": zoneName,
}
routes = append(routes, output)
}
values["http_application_routing"] = routes

agents := make([]interface{}, 0)
if omsAgent := profile["omsAgent"]; omsAgent != nil {
enabled := false
if enabledVal := omsAgent.Enabled; enabledVal != nil {
enabled = *enabledVal
}

workspaceId := ""
if workspaceResourceID := omsAgent.Config["logAnalyticsWorkspaceResourceID"]; workspaceResourceID != nil {
workspaceId = *workspaceResourceID
}

output := map[string]interface{}{
"enabled": enabled,
"log_analytics_workspace_id": workspaceId,
}
agents = append(agents, output)
}
values["oms_agent"] = agents

return []interface{}{values}
}
78 changes: 78 additions & 0 deletions azurerm/data_source_kubernetes_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,60 @@ func TestAccDataSourceAzureRMKubernetesCluster_advancedNetworkingKubenetComplete
})
}

func TestAccDataSourceAzureRMKubernetesCluster_addOnProfileOMS(t *testing.T) {
dataSourceName := "data.azurerm_kubernetes_cluster.test"
ri := acctest.RandInt()
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")
location := testLocation()
config := testAccDataSourceAzureRMKubernetesCluster_addOnProfileOMS(ri, clientId, clientSecret, location)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(dataSourceName),
resource.TestCheckResourceAttr(dataSourceName, "addon_profile.#", "1"),
resource.TestCheckResourceAttr(dataSourceName, "addon_profile.0.oms_agent.#", "1"),
resource.TestCheckResourceAttr(dataSourceName, "addon_profile.0.oms_agent.0.enabled", "true"),
resource.TestCheckResourceAttrSet(dataSourceName, "addon_profile.0.oms_agent.0.log_analytics_workspace_id"),
),
},
},
})
}

func TestAccDataSourceAzureRMKubernetesCluster_addOnProfileRouting(t *testing.T) {
dataSourceName := "data.azurerm_kubernetes_cluster.test"
ri := acctest.RandInt()
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")
location := testLocation()
config := testAccDataSourceAzureRMKubernetesCluster_addOnProfileRouting(ri, clientId, clientSecret, location)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(dataSourceName),
resource.TestCheckResourceAttr(dataSourceName, "addon_profile.#", "1"),
resource.TestCheckResourceAttr(dataSourceName, "addon_profile.0.http_application_routing.#", "1"),
resource.TestCheckResourceAttr(dataSourceName, "addon_profile.0.http_application_routing.0.enabled", "true"),
resource.TestCheckResourceAttrSet(dataSourceName, "addon_profile.0.http_application_routing.0.http_application_routing_zone_name"),
),
},
},
})
}

func testAccDataSourceAzureRMKubernetesCluster_basic(rInt int, clientId string, clientSecret string, location string) string {
resource := testAccAzureRMKubernetesCluster_basic(rInt, clientId, clientSecret, location)
return fmt.Sprintf(`
Expand Down Expand Up @@ -249,3 +303,27 @@ data "azurerm_kubernetes_cluster" "test" {
}
`, resource)
}

func testAccDataSourceAzureRMKubernetesCluster_addOnProfileOMS(rInt int, clientId string, clientSecret string, location string) string {
resource := testAccAzureRMKubernetesCluster_addonProfileOMS(rInt, clientId, clientSecret, location)
return fmt.Sprintf(`
%s
data "azurerm_kubernetes_cluster" "test" {
name = "${azurerm_kubernetes_cluster.test.name}"
resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}"
}
`, resource)
}

func testAccDataSourceAzureRMKubernetesCluster_addOnProfileRouting(rInt int, clientId string, clientSecret string, location string) string {
resource := testAccAzureRMKubernetesCluster_addonProfileRouting(rInt, clientId, clientSecret, location)
return fmt.Sprintf(`
%s
data "azurerm_kubernetes_cluster" "test" {
name = "${azurerm_kubernetes_cluster.test.name}"
resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}"
}
`, resource)
}
Loading

0 comments on commit 531df6d

Please sign in to comment.