forked from Azure/terraform-azurerm-aks
-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.tf
679 lines (622 loc) · 32.7 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
data "azurerm_resource_group" "main" {
name = var.resource_group_name
}
moved {
from = module.ssh-key.tls_private_key.ssh
to = tls_private_key.ssh[0]
}
resource "tls_private_key" "ssh" {
count = var.admin_username == null ? 0 : 1
algorithm = "RSA"
rsa_bits = 2048
}
resource "azurerm_kubernetes_cluster" "main" {
location = coalesce(var.location, data.azurerm_resource_group.main.location)
name = "${local.cluster_name}${var.cluster_name_random_suffix ? substr(md5(uuid()), 0, 4) : ""}"
resource_group_name = data.azurerm_resource_group.main.name
automatic_channel_upgrade = var.automatic_channel_upgrade
azure_policy_enabled = var.azure_policy_enabled
disk_encryption_set_id = var.disk_encryption_set_id
dns_prefix = var.prefix
image_cleaner_enabled = var.image_cleaner_enabled
image_cleaner_interval_hours = var.image_cleaner_interval_hours
kubernetes_version = var.kubernetes_version
local_account_disabled = var.local_account_disabled
node_os_channel_upgrade = var.node_os_channel_upgrade
node_resource_group = var.node_resource_group
oidc_issuer_enabled = var.oidc_issuer_enabled
open_service_mesh_enabled = var.open_service_mesh_enabled
private_cluster_enabled = var.private_cluster_enabled
private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled
private_dns_zone_id = var.private_dns_zone_id
role_based_access_control_enabled = var.role_based_access_control_enabled
run_command_enabled = var.run_command_enabled
sku_tier = var.sku_tier
support_plan = var.support_plan
tags = merge(var.tags, (/*<box>*/ (var.tracing_tags_enabled ? { for k, v in /*</box>*/ {
avm_git_commit = "a2b7e7dc8b41c0c8c0e5e2ab7902b46bc2d6919e"
avm_git_file = "main.tf"
avm_git_last_modified_at = "2024-02-16 15:45:22"
avm_git_org = "Azure"
avm_git_repo = "terraform-azurerm-aks"
avm_yor_trace = "681d31d6-cb63-4b44-b2ad-5f5d864d2a1c"
} /*<box>*/ : replace(k, "avm_", var.tracing_tags_prefix) => v } : {}) /*</box>*/), (/*<box>*/ (var.tracing_tags_enabled ? { for k, v in /*</box>*/ {
avm_yor_name = "main"
} /*<box>*/ : replace(k, "avm_", var.tracing_tags_prefix) => v } : {}) /*</box>*/))
workload_identity_enabled = var.workload_identity_enabled
dynamic "default_node_pool" {
for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"]
content {
name = var.agents_pool_name
vm_size = var.agents_size
enable_auto_scaling = var.enable_auto_scaling
enable_host_encryption = var.enable_host_encryption
enable_node_public_ip = var.enable_node_public_ip
fips_enabled = var.default_node_pool_fips_enabled
max_count = null
max_pods = var.agents_max_pods
min_count = null
node_count = var.agents_count
node_labels = var.agents_labels
node_taints = var.agents_taints
only_critical_addons_enabled = var.only_critical_addons_enabled
orchestrator_version = var.orchestrator_version
os_disk_size_gb = var.os_disk_size_gb
os_disk_type = var.os_disk_type
os_sku = var.os_sku
pod_subnet_id = var.pod_subnet_id
proximity_placement_group_id = var.agents_proximity_placement_group_id
scale_down_mode = var.scale_down_mode
snapshot_id = var.snapshot_id
tags = merge(var.tags, var.agents_tags)
temporary_name_for_rotation = var.temporary_name_for_rotation
type = var.agents_type
ultra_ssd_enabled = var.ultra_ssd_enabled
vnet_subnet_id = var.vnet_subnet_id
zones = var.agents_availability_zones
dynamic "kubelet_config" {
for_each = var.agents_pool_kubelet_configs
content {
allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
container_log_max_line = kubelet_config.value.container_log_max_line
container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
cpu_manager_policy = kubelet_config.value.cpu_manager_policy
image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
pod_max_pid = kubelet_config.value.pod_max_pid
topology_manager_policy = kubelet_config.value.topology_manager_policy
}
}
dynamic "linux_os_config" {
for_each = var.agents_pool_linux_os_configs
content {
swap_file_size_mb = linux_os_config.value.swap_file_size_mb
transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
dynamic "sysctl_config" {
for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
content {
fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
fs_file_max = sysctl_config.value.fs_file_max
fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
fs_nr_open = sysctl_config.value.fs_nr_open
kernel_threads_max = sysctl_config.value.kernel_threads_max
net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
net_core_optmem_max = sysctl_config.value.net_core_optmem_max
net_core_rmem_default = sysctl_config.value.net_core_rmem_default
net_core_rmem_max = sysctl_config.value.net_core_rmem_max
net_core_somaxconn = sysctl_config.value.net_core_somaxconn
net_core_wmem_default = sysctl_config.value.net_core_wmem_default
net_core_wmem_max = sysctl_config.value.net_core_wmem_max
net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
vm_max_map_count = sysctl_config.value.vm_max_map_count
vm_swappiness = sysctl_config.value.vm_swappiness
vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
}
}
}
}
dynamic "upgrade_settings" {
for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"]
content {
max_surge = var.agents_pool_max_surge
}
}
}
}
dynamic "default_node_pool" {
for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : []
content {
name = var.agents_pool_name
vm_size = var.agents_size
enable_auto_scaling = var.enable_auto_scaling
enable_host_encryption = var.enable_host_encryption
enable_node_public_ip = var.enable_node_public_ip
fips_enabled = var.default_node_pool_fips_enabled
max_count = var.agents_max_count
max_pods = var.agents_max_pods
min_count = var.agents_min_count
node_labels = var.agents_labels
node_taints = var.agents_taints
only_critical_addons_enabled = var.only_critical_addons_enabled
orchestrator_version = var.orchestrator_version
os_disk_size_gb = var.os_disk_size_gb
os_disk_type = var.os_disk_type
os_sku = var.os_sku
pod_subnet_id = var.pod_subnet_id
proximity_placement_group_id = var.agents_proximity_placement_group_id
scale_down_mode = var.scale_down_mode
snapshot_id = var.snapshot_id
tags = merge(var.tags, var.agents_tags)
temporary_name_for_rotation = var.temporary_name_for_rotation
type = var.agents_type
ultra_ssd_enabled = var.ultra_ssd_enabled
vnet_subnet_id = var.vnet_subnet_id
zones = var.agents_availability_zones
dynamic "kubelet_config" {
for_each = var.agents_pool_kubelet_configs
content {
allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
container_log_max_line = kubelet_config.value.container_log_max_line
container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
cpu_manager_policy = kubelet_config.value.cpu_manager_policy
image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
pod_max_pid = kubelet_config.value.pod_max_pid
topology_manager_policy = kubelet_config.value.topology_manager_policy
}
}
dynamic "linux_os_config" {
for_each = var.agents_pool_linux_os_configs
content {
swap_file_size_mb = linux_os_config.value.swap_file_size_mb
transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
dynamic "sysctl_config" {
for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
content {
fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
fs_file_max = sysctl_config.value.fs_file_max
fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
fs_nr_open = sysctl_config.value.fs_nr_open
kernel_threads_max = sysctl_config.value.kernel_threads_max
net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
net_core_optmem_max = sysctl_config.value.net_core_optmem_max
net_core_rmem_default = sysctl_config.value.net_core_rmem_default
net_core_rmem_max = sysctl_config.value.net_core_rmem_max
net_core_somaxconn = sysctl_config.value.net_core_somaxconn
net_core_wmem_default = sysctl_config.value.net_core_wmem_default
net_core_wmem_max = sysctl_config.value.net_core_wmem_max
net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
vm_max_map_count = sysctl_config.value.vm_max_map_count
vm_swappiness = sysctl_config.value.vm_swappiness
vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
}
}
}
}
dynamic "upgrade_settings" {
for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"]
content {
max_surge = var.agents_pool_max_surge
}
}
}
}
dynamic "aci_connector_linux" {
for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : []
content {
subnet_name = var.aci_connector_linux_subnet_name
}
}
dynamic "api_server_access_profile" {
for_each = var.api_server_authorized_ip_ranges != null || var.api_server_subnet_id != null ? [
"api_server_access_profile"
] : []
content {
authorized_ip_ranges = var.api_server_authorized_ip_ranges
subnet_id = var.api_server_subnet_id
}
}
dynamic "auto_scaler_profile" {
for_each = var.auto_scaler_profile_enabled ? ["default_auto_scaler_profile"] : []
content {
balance_similar_node_groups = var.auto_scaler_profile_balance_similar_node_groups
empty_bulk_delete_max = var.auto_scaler_profile_empty_bulk_delete_max
expander = var.auto_scaler_profile_expander
max_graceful_termination_sec = var.auto_scaler_profile_max_graceful_termination_sec
max_node_provisioning_time = var.auto_scaler_profile_max_node_provisioning_time
max_unready_nodes = var.auto_scaler_profile_max_unready_nodes
max_unready_percentage = var.auto_scaler_profile_max_unready_percentage
new_pod_scale_up_delay = var.auto_scaler_profile_new_pod_scale_up_delay
scale_down_delay_after_add = var.auto_scaler_profile_scale_down_delay_after_add
scale_down_delay_after_delete = local.auto_scaler_profile_scale_down_delay_after_delete
scale_down_delay_after_failure = var.auto_scaler_profile_scale_down_delay_after_failure
scale_down_unneeded = var.auto_scaler_profile_scale_down_unneeded
scale_down_unready = var.auto_scaler_profile_scale_down_unready
scale_down_utilization_threshold = var.auto_scaler_profile_scale_down_utilization_threshold
scan_interval = var.auto_scaler_profile_scan_interval
skip_nodes_with_local_storage = var.auto_scaler_profile_skip_nodes_with_local_storage
skip_nodes_with_system_pods = var.auto_scaler_profile_skip_nodes_with_system_pods
}
}
dynamic "azure_active_directory_role_based_access_control" {
for_each = var.role_based_access_control_enabled && var.rbac_aad && var.rbac_aad_managed ? ["rbac"] : []
content {
admin_group_object_ids = var.rbac_aad_admin_group_object_ids
azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled
managed = true
tenant_id = var.rbac_aad_tenant_id
}
}
dynamic "azure_active_directory_role_based_access_control" {
for_each = var.role_based_access_control_enabled && var.rbac_aad && !var.rbac_aad_managed ? ["rbac"] : []
content {
client_app_id = var.rbac_aad_client_app_id
managed = false
server_app_id = var.rbac_aad_server_app_id
server_app_secret = var.rbac_aad_server_app_secret
tenant_id = var.rbac_aad_tenant_id
}
}
dynamic "confidential_computing" {
for_each = var.confidential_computing == null ? [] : [var.confidential_computing]
content {
sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled
}
}
dynamic "http_proxy_config" {
for_each = var.http_proxy_config == null ? [] : ["http_proxy_config"]
content {
http_proxy = coalesce(var.http_proxy_config.http_proxy, var.http_proxy_config.https_proxy)
https_proxy = coalesce(var.http_proxy_config.https_proxy, var.http_proxy_config.http_proxy)
no_proxy = var.http_proxy_config.no_proxy
trusted_ca = var.http_proxy_config.trusted_ca
}
}
dynamic "identity" {
for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : []
content {
type = var.identity_type
identity_ids = var.identity_ids
}
}
dynamic "ingress_application_gateway" {
for_each = local.ingress_application_gateway_enabled ? ["ingress_application_gateway"] : []
content {
gateway_id = try(var.brown_field_application_gateway_for_ingress.id, null)
gateway_name = try(var.green_field_application_gateway_for_ingress.name, null)
subnet_cidr = try(var.green_field_application_gateway_for_ingress.subnet_cidr, null)
subnet_id = try(var.green_field_application_gateway_for_ingress.subnet_id, null)
}
}
dynamic "key_management_service" {
for_each = var.kms_enabled ? ["key_management_service"] : []
content {
key_vault_key_id = var.kms_key_vault_key_id
key_vault_network_access = var.kms_key_vault_network_access
}
}
dynamic "key_vault_secrets_provider" {
for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : []
content {
secret_rotation_enabled = var.secret_rotation_enabled
secret_rotation_interval = var.secret_rotation_interval
}
}
dynamic "kubelet_identity" {
for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity]
content {
client_id = kubelet_identity.value.client_id
object_id = kubelet_identity.value.object_id
user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id
}
}
dynamic "linux_profile" {
for_each = var.admin_username == null ? [] : ["linux_profile"]
content {
admin_username = var.admin_username
ssh_key {
key_data = replace(coalesce(var.public_ssh_key, tls_private_key.ssh[0].public_key_openssh), "\n", "")
}
}
}
dynamic "maintenance_window" {
for_each = var.maintenance_window != null ? ["maintenance_window"] : []
content {
dynamic "allowed" {
for_each = var.maintenance_window.allowed
content {
day = allowed.value.day
hours = allowed.value.hours
}
}
dynamic "not_allowed" {
for_each = var.maintenance_window.not_allowed
content {
end = not_allowed.value.end
start = not_allowed.value.start
}
}
}
}
dynamic "maintenance_window_auto_upgrade" {
for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade]
content {
duration = maintenance_window_auto_upgrade.value.duration
frequency = maintenance_window_auto_upgrade.value.frequency
interval = maintenance_window_auto_upgrade.value.interval
day_of_month = maintenance_window_auto_upgrade.value.day_of_month
day_of_week = maintenance_window_auto_upgrade.value.day_of_week
start_date = maintenance_window_auto_upgrade.value.start_date
start_time = maintenance_window_auto_upgrade.value.start_time
utc_offset = maintenance_window_auto_upgrade.value.utc_offset
week_index = maintenance_window_auto_upgrade.value.week_index
dynamic "not_allowed" {
for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed
content {
end = not_allowed.value.end
start = not_allowed.value.start
}
}
}
}
dynamic "maintenance_window_node_os" {
for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os]
content {
duration = maintenance_window_node_os.value.duration
frequency = maintenance_window_node_os.value.frequency
interval = maintenance_window_node_os.value.interval
day_of_month = maintenance_window_node_os.value.day_of_month
day_of_week = maintenance_window_node_os.value.day_of_week
start_date = maintenance_window_node_os.value.start_date
start_time = maintenance_window_node_os.value.start_time
utc_offset = maintenance_window_node_os.value.utc_offset
week_index = maintenance_window_node_os.value.week_index
dynamic "not_allowed" {
for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed
content {
end = not_allowed.value.end
start = not_allowed.value.start
}
}
}
}
dynamic "microsoft_defender" {
for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : []
content {
log_analytics_workspace_id = local.log_analytics_workspace.id
}
}
dynamic "monitor_metrics" {
for_each = var.monitor_metrics != null ? ["monitor_metrics"] : []
content {
annotations_allowed = var.monitor_metrics.annotations_allowed
labels_allowed = var.monitor_metrics.labels_allowed
}
}
network_profile {
network_plugin = var.network_plugin
dns_service_ip = var.net_profile_dns_service_ip
ebpf_data_plane = var.ebpf_data_plane
load_balancer_sku = var.load_balancer_sku
network_plugin_mode = var.network_plugin_mode
network_policy = var.network_policy
outbound_type = var.net_profile_outbound_type
pod_cidr = var.net_profile_pod_cidr
service_cidr = var.net_profile_service_cidr
dynamic "load_balancer_profile" {
for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [
"load_balancer_profile"
] : []
content {
idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes
managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count
managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count
outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids
outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids
outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated
}
}
}
dynamic "oms_agent" {
for_each = var.log_analytics_workspace_enabled ? ["oms_agent"] : []
content {
log_analytics_workspace_id = local.log_analytics_workspace.id
msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled
}
}
dynamic "service_mesh_profile" {
for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"]
content {
mode = var.service_mesh_profile.mode
external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled
internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled
}
}
dynamic "service_principal" {
for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : []
content {
client_id = var.client_id
client_secret = var.client_secret
}
}
dynamic "storage_profile" {
for_each = var.storage_profile_enabled ? ["storage_profile"] : []
content {
blob_driver_enabled = var.storage_profile_blob_driver_enabled
disk_driver_enabled = var.storage_profile_disk_driver_enabled
disk_driver_version = var.storage_profile_disk_driver_version
file_driver_enabled = var.storage_profile_file_driver_enabled
snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled
}
}
dynamic "web_app_routing" {
for_each = var.web_app_routing == null ? [] : ["web_app_routing"]
content {
dns_zone_id = var.web_app_routing.dns_zone_id
}
}
dynamic "workload_autoscaler_profile" {
for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile]
content {
keda_enabled = workload_autoscaler_profile.value.keda_enabled
vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled
}
}
lifecycle {
ignore_changes = [
http_application_routing_enabled,
http_proxy_config[0].no_proxy,
kubernetes_version,
public_network_access_enabled,
# we might have a random suffix in cluster's name so we have to ignore it here, but we've traced user supplied cluster name by `null_resource.kubernetes_cluster_name_keeper` so when the name is changed we'll recreate this resource.
name,
]
replace_triggered_by = [
null_resource.kubernetes_cluster_name_keeper.id
]
precondition {
condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type != "")
error_message = "Either `client_id` and `client_secret` or `identity_type` must be set."
}
precondition {
# Why don't use var.identity_ids != null && length(var.identity_ids)>0 ? Because bool expression in Terraform is not short circuit so even var.identity_ids is null Terraform will still invoke length function with null and cause error. https://github.com/hashicorp/terraform/issues/24128
condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type == "SystemAssigned") || (var.identity_ids == null ? false : length(var.identity_ids) > 0)
error_message = "If use identity and `UserAssigned` is set, an `identity_ids` must be set as well."
}
precondition {
condition = !(var.microsoft_defender_enabled && !var.log_analytics_workspace_enabled)
error_message = "Enabling Microsoft Defender requires that `log_analytics_workspace_enabled` be set to true."
}
precondition {
condition = !(var.load_balancer_profile_enabled && var.load_balancer_sku != "standard")
error_message = "Enabling load_balancer_profile requires that `load_balancer_sku` be set to `standard`"
}
precondition {
condition = local.automatic_channel_upgrade_check
error_message = "Either disable automatic upgrades, or specify `kubernetes_version` or `orchestrator_version` only up to the minor version when using `automatic_channel_upgrade=patch`. You don't need to specify `kubernetes_version` at all when using `automatic_channel_upgrade=stable|rapid|node-image`, where `orchestrator_version` always must be set to `null`."
}
precondition {
condition = var.role_based_access_control_enabled || !var.rbac_aad
error_message = "Enabling Azure Active Directory integration requires that `role_based_access_control_enabled` be set to true."
}
precondition {
condition = !(var.kms_enabled && var.identity_type != "UserAssigned")
error_message = "KMS etcd encryption doesn't work with system-assigned managed identity."
}
precondition {
condition = !var.workload_identity_enabled || var.oidc_issuer_enabled
error_message = "`oidc_issuer_enabled` must be set to `true` to enable Azure AD Workload Identity"
}
precondition {
condition = var.network_plugin_mode != "overlay" || var.network_plugin == "azure"
error_message = "When network_plugin_mode is set to `overlay`, the network_plugin field can only be set to azure."
}
precondition {
condition = var.ebpf_data_plane != "cilium" || var.network_plugin == "azure"
error_message = "When ebpf_data_plane is set to cilium, the network_plugin field can only be set to azure."
}
precondition {
condition = var.ebpf_data_plane != "cilium" || var.network_plugin_mode == "overlay" || var.pod_subnet_id != null
error_message = "When ebpf_data_plane is set to cilium, one of either network_plugin_mode = `overlay` or pod_subnet_id must be specified."
}
precondition {
condition = can(coalesce(var.cluster_name, var.prefix))
error_message = "You must set one of `var.cluster_name` and `var.prefix` to create `azurerm_kubernetes_cluster.main`."
}
precondition {
condition = var.automatic_channel_upgrade != "node-image" || var.node_os_channel_upgrade == "NodeImage"
error_message = "`node_os_channel_upgrade` must be set to `NodeImage` if `automatic_channel_upgrade` has been set to `node-image`."
}
precondition {
condition = (var.kubelet_identity == null) || (
(var.client_id == "" || var.client_secret == "") && var.identity_type == "UserAssigned" && try(length(var.identity_ids), 0) > 0)
error_message = "When `kubelet_identity` is enabled - The `type` field in the `identity` block must be set to `UserAssigned` and `identity_ids` must be set."
}
precondition {
condition = var.enable_auto_scaling != true || var.agents_type == "VirtualMachineScaleSets"
error_message = "Autoscaling on default node pools is only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets type nodes."
}
precondition {
condition = var.brown_field_application_gateway_for_ingress == null || var.green_field_application_gateway_for_ingress == null
error_message = "Either one of `var.brown_field_application_gateway_for_ingress` or `var.green_field_application_gateway_for_ingress` must be `null`."
}
}
}
resource "null_resource" "kubernetes_cluster_name_keeper" {
triggers = {
name = local.cluster_name
}
}
resource "null_resource" "kubernetes_version_keeper" {
triggers = {
version = var.kubernetes_version
}
}
resource "azapi_update_resource" "aks_cluster_post_create" {
type = "Microsoft.ContainerService/managedClusters@2024-02-01"
body = jsonencode({
properties = {
kubernetesVersion = var.kubernetes_version
}
})
resource_id = azurerm_kubernetes_cluster.main.id
lifecycle {
ignore_changes = all
replace_triggered_by = [null_resource.kubernetes_version_keeper.id]
}
}
resource "null_resource" "http_proxy_config_no_proxy_keeper" {
count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0
triggers = {
http_proxy_no_proxy = try(join(",", try(sort(var.http_proxy_config.no_proxy), [])), "")
}
}
resource "azapi_update_resource" "aks_cluster_http_proxy_config_no_proxy" {
count = can(var.http_proxy_config.no_proxy[0]) ? 1 : 0
type = "Microsoft.ContainerService/managedClusters@2024-02-01"
body = jsonencode({
properties = {
httpProxyConfig = {
noProxy = var.http_proxy_config.no_proxy
}
}
})
resource_id = azurerm_kubernetes_cluster.main.id
depends_on = [azapi_update_resource.aks_cluster_post_create]
lifecycle {
ignore_changes = all
replace_triggered_by = [null_resource.http_proxy_config_no_proxy_keeper[0].id]
}
}