Skip to content

Commit

Permalink
Add kubernetes_cluster_agent_pool
Browse files Browse the repository at this point in the history
  • Loading branch information
titilambert committed Oct 16, 2019
1 parent f25975f commit 3a369b6
Show file tree
Hide file tree
Showing 4 changed files with 626 additions and 108 deletions.
5 changes: 5 additions & 0 deletions azurerm/internal/services/containers/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ type Client struct {
WebhooksClient *containerregistry.WebhooksClient
ReplicationsClient *containerregistry.ReplicationsClient
ServicesClient *containerservice.ContainerServicesClient
AgentPoolsClient *containerservice.AgentPoolsClient
}

func BuildClient(o *common.ClientOptions) *Client {
Expand All @@ -37,12 +38,16 @@ func BuildClient(o *common.ClientOptions) *Client {
KubernetesClustersClient := containerservice.NewManagedClustersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&KubernetesClustersClient.Client, o.ResourceManagerAuthorizer)

AgentPoolsClient := containerservice.NewAgentPoolsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&AgentPoolsClient.Client, o.ResourceManagerAuthorizer)

return &Client{
KubernetesClustersClient: &KubernetesClustersClient,
GroupsClient: &GroupsClient,
RegistriesClient: &RegistriesClient,
WebhooksClient: &WebhooksClient,
ReplicationsClient: &ReplicationsClient,
ServicesClient: &ServicesClient,
AgentPoolsClient: &AgentPoolsClient,
}
}
1 change: 1 addition & 0 deletions azurerm/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,7 @@ func Provider() terraform.ResourceProvider {
"azurerm_kusto_cluster": resourceArmKustoCluster(),
"azurerm_kusto_database": resourceArmKustoDatabase(),
"azurerm_kusto_eventhub_data_connection": resourceArmKustoEventHubDataConnection(),
"azurerm_kubernetes_cluster_agent_pool": resourceArmKubernetesClusterAgentPool(),
"azurerm_lb_backend_address_pool": resourceArmLoadBalancerBackendAddressPool(),
"azurerm_lb_nat_pool": resourceArmLoadBalancerNatPool(),
"azurerm_lb_nat_rule": resourceArmLoadBalancerNatRule(),
Expand Down
298 changes: 190 additions & 108 deletions azurerm/resource_arm_kubernetes_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ func resourceArmKubernetesCluster() *schema.Resource {
"agent_pool_profile": {
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Expand Down Expand Up @@ -778,15 +779,96 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}
log.Printf("[DEBUG] Updated the Service Principal for Kubernetes Cluster %q (Resource Group %q).", name, resourceGroup)
}

/******************************************/
/*
<<<<<<< HEAD
if features.ShouldResourcesBeImported() {
existing, err := client.Get(ctx, resourceGroup, name)
=======
existing, err := client.Get(ctx, resourceGroup, name)
if err != nil {
if !utils.ResponseWasNotFound(existing.Response) {
return fmt.Errorf("Error checking for presence of existing Kubernetes Cluster %q (Resource Group %q): %s", name, resourceGroup, err)
}
}
if requireResourcesToBeImported && d.IsNewResource() {
if existing.ID != nil && *existing.ID != "" {
return tf.ImportAsExistsError("azurerm_kubernetes_cluster", *existing.ID)
}
}
*/
agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d)
if err != nil {
return err
}
// TODO handle AgentPoolProfiles update through AgentPool client
APclient := meta.(*ArmClient).Containers.AgentPoolsClient
agentPoolName := *agentProfiles[0].Name
resp, err := APclient.Get(ctx, resourceGroup, name, agentPoolName)
if err != nil {
if utils.ResponseWasNotFound(resp.Response) {
log.Printf("[DEBUG] Managed Kubernetes Cluster %q was not found in Resource Group %q - removing from state!", name, resourceGroup)
d.SetId("")
return nil
}

return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}
// Handle Primary Agent pool Update
profile := resp.ManagedClusterAgentPoolProfileProperties
//resourceArmKubernetesClusterAgentPoolCreateUpdate
profile.Count = agentProfiles[0].Count

agentProfile := convertKubernetesClusterAgentPoolProfileToKubernetesClusterAgentPoolProfileProperties(agentProfiles[0])

agentPoolParameters := containerservice.AgentPool{
Name: &name,
ManagedClusterAgentPoolProfileProperties: &agentProfile,
}

agentPoolFuture, err := APclient.CreateOrUpdate(ctx, resourceGroup, name, agentPoolName, agentPoolParameters)
if err != nil {
return fmt.Errorf("Error creating/updating Managed Kubernetes Cluster Agent Pool %q (Resource Group %q): %+v", agentPoolName, resourceGroup, err)
}
if err = agentPoolFuture.WaitForCompletionRef(ctx, client.Client); err != nil {
return fmt.Errorf("Error waiting for completion of Managed Kubernetes Cluster Agent Pool %q (Resource Group %q): %+v", agentPoolName, resourceGroup, err)
}

read, err := client.Get(ctx, resourceGroup, name)
if err != nil {
return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}

if read.ID == nil {
return fmt.Errorf("Cannot read ID for Managed Kubernetes Cluster %q (Resource Group %q)", name, resourceGroup)
}

resourceArmKubernetesClusterRead(d, meta)
d.SetId(*read.ID)
/****************************/
if features.ShouldResourcesBeImported() {
existing, err := client.Get(ctx, resourceGroup, name)
if err != nil {
if !utils.ResponseWasNotFound(existing.Response) {
return fmt.Errorf("Error checking for presence of existing Kubernetes Cluster %q (Resource Group %q): %s", name, resourceGroup, err)
}
}

if existing.ID != nil && *existing.ID != "" {
return tf.ImportAsExistsError("azurerm_kubernetes_cluster", *existing.ID)
}
}

location := azure.NormalizeLocation(d.Get("location").(string))
dnsPrefix := d.Get("dns_prefix").(string)
kubernetesVersion := d.Get("kubernetes_version").(string)

linuxProfile := expandKubernetesClusterLinuxProfile(d)
agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d)
/*agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d)
if err != nil {
return err
}
}*/
windowsProfile := expandKubernetesClusterWindowsProfile(d)
networkProfile := expandKubernetesClusterNetworkProfile(d)
servicePrincipalProfile := expandAzureRmKubernetesClusterServicePrincipal(d)
Expand Down Expand Up @@ -835,7 +917,7 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}
return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}

read, err := client.Get(ctx, resourceGroup, name)
read, err = client.Get(ctx, resourceGroup, name)
if err != nil {
return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}
Expand Down Expand Up @@ -1182,67 +1264,65 @@ func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) ([]contain
configs := d.Get("agent_pool_profile").([]interface{})

profiles := make([]containerservice.ManagedClusterAgentPoolProfile, 0)
for config_id := range configs {
config := configs[config_id].(map[string]interface{})

name := config["name"].(string)
poolType := config["type"].(string)
count := int32(config["count"].(int))
vmSize := config["vm_size"].(string)
osDiskSizeGB := int32(config["os_disk_size_gb"].(int))
osType := config["os_type"].(string)

profile := containerservice.ManagedClusterAgentPoolProfile{
Name: utils.String(name),
Type: containerservice.AgentPoolType(poolType),
Count: utils.Int32(count),
VMSize: containerservice.VMSizeTypes(vmSize),
OsDiskSizeGB: utils.Int32(osDiskSizeGB),
OsType: containerservice.OSType(osType),
}
config := configs[0].(map[string]interface{})

if maxPods := int32(config["max_pods"].(int)); maxPods > 0 {
profile.MaxPods = utils.Int32(maxPods)
}
name := config["name"].(string)
poolType := config["type"].(string)
count := int32(config["count"].(int))
vmSize := config["vm_size"].(string)
osDiskSizeGB := int32(config["os_disk_size_gb"].(int))
osType := config["os_type"].(string)

vnetSubnetID := config["vnet_subnet_id"].(string)
if vnetSubnetID != "" {
profile.VnetSubnetID = utils.String(vnetSubnetID)
}
profile := containerservice.ManagedClusterAgentPoolProfile{
Name: utils.String(name),
Type: containerservice.AgentPoolType(poolType),
Count: utils.Int32(count),
VMSize: containerservice.VMSizeTypes(vmSize),
OsDiskSizeGB: utils.Int32(osDiskSizeGB),
OsType: containerservice.OSType(osType),
}

if maxCount := int32(config["max_count"].(int)); maxCount > 0 {
profile.MaxCount = utils.Int32(maxCount)
}
if maxPods := int32(config["max_pods"].(int)); maxPods > 0 {
profile.MaxPods = utils.Int32(maxPods)
}

if minCount := int32(config["min_count"].(int)); minCount > 0 {
profile.MinCount = utils.Int32(minCount)
}
vnetSubnetID := config["vnet_subnet_id"].(string)
if vnetSubnetID != "" {
profile.VnetSubnetID = utils.String(vnetSubnetID)
}

if maxCount := int32(config["max_count"].(int)); maxCount > 0 {
profile.MaxCount = utils.Int32(maxCount)
}

if enableAutoScalingItf := config["enable_auto_scaling"]; enableAutoScalingItf != nil {
profile.EnableAutoScaling = utils.Bool(enableAutoScalingItf.(bool))
if minCount := int32(config["min_count"].(int)); minCount > 0 {
profile.MinCount = utils.Int32(minCount)
}

// Auto scaling will change the number of nodes, but the original count number should not be sent again.
// This avoid the cluster being resized after creation.
if *profile.EnableAutoScaling && !d.IsNewResource() {
profile.Count = nil
}
}
if enableAutoScalingItf := config["enable_auto_scaling"]; enableAutoScalingItf != nil {
profile.EnableAutoScaling = utils.Bool(enableAutoScalingItf.(bool))

if availavilityZones := utils.ExpandStringSlice(config["availability_zones"].([]interface{})); len(*availavilityZones) > 0 {
profile.AvailabilityZones = availavilityZones
// Auto scaling will change the number of nodes, but the original count number should not be sent again.
// This avoid the cluster being resized after creation.
if *profile.EnableAutoScaling && !d.IsNewResource() {
profile.Count = nil
}
}

if *profile.EnableAutoScaling && (profile.MinCount == nil || profile.MaxCount == nil) {
return nil, fmt.Errorf("Can't create an AKS cluster with autoscaling enabled but not setting min_count or max_count")
}
if availavilityZones := utils.ExpandStringSlice(config["availability_zones"].([]interface{})); len(*availavilityZones) > 0 {
profile.AvailabilityZones = availavilityZones
}

if nodeTaints := utils.ExpandStringSlice(config["node_taints"].([]interface{})); len(*nodeTaints) > 0 {
profile.NodeTaints = nodeTaints
}
if *profile.EnableAutoScaling && (profile.MinCount == nil || profile.MaxCount == nil) {
return nil, fmt.Errorf("Can't create an AKS cluster with autoscaling enabled but not setting min_count or max_count")
}

profiles = append(profiles, profile)
if nodeTaints := utils.ExpandStringSlice(config["node_taints"].([]interface{})); len(*nodeTaints) > 0 {
profile.NodeTaints = nodeTaints
}

profiles = append(profiles, profile)

return profiles, nil
}

Expand All @@ -1253,76 +1333,78 @@ func flattenKubernetesClusterAgentPoolProfiles(profiles *[]containerservice.Mana

agentPoolProfiles := make([]interface{}, 0)

for _, profile := range *profiles {
count := 0
if profile.Count != nil {
count = int(*profile.Count)
}
profile := (*profiles)[0]

enableAutoScaling := false
if profile.EnableAutoScaling != nil {
enableAutoScaling = *profile.EnableAutoScaling
}
count := 0

fqdnVal := ""
if fqdn != nil {
// temporarily persist the parent FQDN here until `fqdn` is removed from the `agent_pool_profile`
fqdnVal = *fqdn
}
if profile.Count != nil {
count = int(*profile.Count)
}

maxCount := 0
if profile.MaxCount != nil {
maxCount = int(*profile.MaxCount)
}
enableAutoScaling := false
if profile.EnableAutoScaling != nil {
enableAutoScaling = *profile.EnableAutoScaling
}

maxPods := 0
if profile.MaxPods != nil {
maxPods = int(*profile.MaxPods)
}
fqdnVal := ""
if fqdn != nil {
// temporarily persist the parent FQDN here until `fqdn` is removed from the `agent_pool_profile`
fqdnVal = *fqdn
}

minCount := 0
if profile.MinCount != nil {
minCount = int(*profile.MinCount)
}
maxCount := 0
if profile.MaxCount != nil {
maxCount = int(*profile.MaxCount)
}

name := ""
if profile.Name != nil {
name = *profile.Name
}
maxPods := 0
if profile.MaxPods != nil {
maxPods = int(*profile.MaxPods)
}

osDiskSizeGB := 0
if profile.OsDiskSizeGB != nil {
osDiskSizeGB = int(*profile.OsDiskSizeGB)
}
minCount := 0
if profile.MinCount != nil {
minCount = int(*profile.MinCount)
}

subnetId := ""
if profile.VnetSubnetID != nil {
subnetId = *profile.VnetSubnetID
}
name := ""
if profile.Name != nil {
name = *profile.Name
}

agentPoolProfile := map[string]interface{}{
"availability_zones": utils.FlattenStringSlice(profile.AvailabilityZones),
"count": count,
"enable_auto_scaling": enableAutoScaling,
"max_count": maxCount,
"max_pods": maxPods,
"min_count": minCount,
"name": name,
"node_taints": utils.FlattenStringSlice(profile.NodeTaints),
"os_disk_size_gb": osDiskSizeGB,
"os_type": string(profile.OsType),
"type": string(profile.Type),
"vm_size": string(profile.VMSize),
"vnet_subnet_id": subnetId,

// TODO: remove in 2.0
"fqdn": fqdnVal,
}
osDiskSizeGB := 0
if profile.OsDiskSizeGB != nil {
osDiskSizeGB = int(*profile.OsDiskSizeGB)
}

agentPoolProfiles = append(agentPoolProfiles, agentPoolProfile)
subnetId := ""
if profile.VnetSubnetID != nil {
subnetId = *profile.VnetSubnetID
}

agentPoolProfile := map[string]interface{}{
"availability_zones": utils.FlattenStringSlice(profile.AvailabilityZones),
"count": count,
"enable_auto_scaling": enableAutoScaling,
"max_count": maxCount,
"max_pods": maxPods,
"min_count": minCount,
"name": name,
"node_taints": utils.FlattenStringSlice(profile.NodeTaints),
"os_disk_size_gb": osDiskSizeGB,
"os_type": string(profile.OsType),
"type": string(profile.Type),
"vm_size": string(profile.VMSize),
"vnet_subnet_id": subnetId,

// TODO: remove in 2.0
"fqdn": fqdnVal,
}

agentPoolProfiles = append(agentPoolProfiles, agentPoolProfile)

return agentPoolProfiles

}

func expandKubernetesClusterLinuxProfile(d *schema.ResourceData) *containerservice.LinuxProfile {
Expand Down
Loading

0 comments on commit 3a369b6

Please sign in to comment.