diff --git a/azurerm/internal/services/containers/kubernetes_cluster_auth_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_auth_resource_test.go index 308b566d4df5..2beaa1817acf 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_auth_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_auth_resource_test.go @@ -9,17 +9,19 @@ import ( ) var kubernetesAuthTests = map[string]func(t *testing.T){ - "apiServerAuthorizedIPRanges": testAccKubernetesCluster_apiServerAuthorizedIPRanges, - "managedClusterIdentity": testAccKubernetesCluster_managedClusterIdentity, - "userAssignedIdentity": testAccKubernetesCluster_userAssignedIdentity, - "updateWithUserAssignedIdentity": testAccKubernetesCluster_updateWithUserAssignedIdentity, - "roleBasedAccessControl": testAccKubernetesCluster_roleBasedAccessControl, - "AAD": testAccKubernetesCluster_roleBasedAccessControlAAD, - "AADUpdateToManaged": testAccKubernetesCluster_roleBasedAccessControlAADUpdateToManaged, - "AADManaged": testAccKubernetesCluster_roleBasedAccessControlAADManaged, - "AADManagedChange": testAccKubernetesCluster_roleBasedAccessControlAADManagedChange, - "roleBasedAccessControlAzure": testAccKubernetesCluster_roleBasedAccessControlAzure, - "servicePrincipal": testAccKubernetesCluster_servicePrincipal, + "apiServerAuthorizedIPRanges": testAccKubernetesCluster_apiServerAuthorizedIPRanges, + "managedClusterIdentity": testAccKubernetesCluster_managedClusterIdentity, + "userAssignedIdentity": testAccKubernetesCluster_userAssignedIdentity, + "updateWithUserAssignedIdentity": testAccKubernetesCluster_updateWithUserAssignedIdentity, + "roleBasedAccessControl": testAccKubernetesCluster_roleBasedAccessControl, + "AAD": testAccKubernetesCluster_roleBasedAccessControlAAD, + "AADUpdateToManaged": testAccKubernetesCluster_roleBasedAccessControlAADUpdateToManaged, + "AADManaged": testAccKubernetesCluster_roleBasedAccessControlAADManaged, + "AADManagedChange": testAccKubernetesCluster_roleBasedAccessControlAADManagedChange, + "roleBasedAccessControlAzure": testAccKubernetesCluster_roleBasedAccessControlAzure, + "servicePrincipal": testAccKubernetesCluster_servicePrincipal, + "servicePrincipalToSystemAssigned": testAccKubernetesCluster_servicePrincipalToSystemAssignedIdentity, + "servicePrincipalToUserAssigned": testAccKubernetesCluster_servicePrincipalToUserAssignedIdentity, } func TestAccKubernetesCluster_apiServerAuthorizedIPRanges(t *testing.T) { @@ -417,6 +419,71 @@ func testAccKubernetesCluster_servicePrincipal(t *testing.T) { }) } +func TestAccKubernetesCluster_servicePrincipalToSystemAssignedIdentity(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_servicePrincipalToSystemAssignedIdentity(t) +} + +func testAccKubernetesCluster_servicePrincipalToSystemAssignedIdentity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + clientData := data.Client() + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.servicePrincipalConfig(data, clientData.Default.ClientID, clientData.Default.ClientSecret), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.%").HasValue("0"), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + { + Config: r.managedClusterIdentityConfig(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.0.type").HasValue("SystemAssigned"), + check.That(data.ResourceName).Key("kubelet_identity.0.client_id").Exists(), + check.That(data.ResourceName).Key("kubelet_identity.0.object_id").Exists(), + check.That(data.ResourceName).Key("kubelet_identity.0.user_assigned_identity_id").Exists(), + check.That(data.ResourceName).Key("service_principal.%").HasValue("0"), + ), + }, + data.ImportStep(), + }) +} + +func TestAccKubernetesCluster_servicePrincipalToUserAssignedIdentity(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_servicePrincipalToUserAssignedIdentity(t) +} + +func testAccKubernetesCluster_servicePrincipalToUserAssignedIdentity(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + clientData := data.Client() + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.servicePrincipalConfig(data, clientData.Default.ClientID, clientData.Default.ClientSecret), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.%").HasValue("0"), + ), + }, + data.ImportStep("service_principal.0.client_secret"), + { + Config: r.userAssignedIdentityConfig(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("identity.0.type").HasValue("UserAssigned"), + check.That(data.ResourceName).Key("identity.0.user_assigned_identity_id").Exists(), + ), + }, + data.ImportStep(), + }) +} + func TestAccKubernetesCluster_updateRoleBasedAccessControlAAD(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccKubernetesCluster_updateRoleBaseAccessControlAAD(t) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 29d5930dc8d1..355c9d4f5017 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -45,6 +45,10 @@ func resourceKubernetesCluster() *pluginsdk.Resource { pluginsdk.ForceNewIfChange("sku_tier", func(ctx context.Context, old, new, meta interface{}) bool { return new == "Free" }), + // Migration of `identity` to `service_principal` is not allowed, the other way around is + pluginsdk.ForceNewIfChange("service_principal.0.client_id", func(ctx context.Context, old, new, meta interface{}) bool { + return old == "msi" || old == "" + }), ), Timeouts: &pluginsdk.ResourceTimeout{ @@ -227,16 +231,15 @@ func resourceKubernetesCluster() *pluginsdk.Resource { }, "identity": { - Type: pluginsdk.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, + Type: pluginsdk.TypeList, + Optional: true, + ExactlyOneOf: []string{"identity", "service_principal"}, + MaxItems: 1, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ "type": { Type: pluginsdk.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ string(containerservice.ResourceIdentityTypeSystemAssigned), string(containerservice.ResourceIdentityTypeUserAssigned), @@ -615,9 +618,10 @@ func resourceKubernetesCluster() *pluginsdk.Resource { }, "service_principal": { - Type: pluginsdk.TypeList, - Optional: true, - MaxItems: 1, + Type: pluginsdk.TypeList, + Optional: true, + ExactlyOneOf: []string{"identity", "service_principal"}, + MaxItems: 1, Elem: &pluginsdk.Resource{ Schema: map[string]*pluginsdk.Schema{ "client_id": { @@ -1007,7 +1011,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} } } - if d.HasChange("service_principal") { + if d.HasChange("service_principal") && !d.HasChange("identity") { log.Printf("[DEBUG] Updating the Service Principal for Kubernetes Cluster %q (Resource Group %q)..", id.ManagedClusterName, id.ResourceGroup) servicePrincipals := d.Get("service_principal").([]interface{}) // we'll be rotating the Service Principal - removing the SP block is handled by the validate function diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index b6f101be4c5c..d9e36a37e55c 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -92,9 +92,9 @@ In addition, one of either `identity` or `service_principal` blocks must be spec * `disk_encryption_set_id` - (Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys). -* `identity` - (Optional) An `identity` block as defined below. Changing this forces a new resource to be created. +* `identity` - (Optional) An `identity` block as defined below. One of either `identity` or `service_principal` must be specified. --> **NOTE:** One of either `identity` or `service_principal` must be specified. +!> **NOTE:** A migration scenario from `service_principal` to `identity` is supported. When upgrading `service_principal` to `identity`, your cluster's control plane and addon pods will switch to use managed identity, but the kubelets will keep using your configured `service_principal` until you upgrade your Node Pool. * `kubernetes_version` - (Optional) Version of Kubernetes specified when creating the AKS managed cluster. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade). @@ -158,9 +158,9 @@ resource "azurerm_kubernetes_cluster" "example" { * `role_based_access_control` - (Optional) A `role_based_access_control` block. Changing this forces a new resource to be created. -* `service_principal` - (Optional) A `service_principal` block as documented below. +* `service_principal` - (Optional) A `service_principal` block as documented below. One of either `identity` or `service_principal` must be specified. --> **NOTE:** One of either `identity` or `service_principal` must be specified. +!> **NOTE:** A migration scenario from `service_principal` to `identity` is supported. When upgrading `service_principal` to `identity`, your cluster's control plane and addon pods will switch to use managed identity, but the kubelets will keep using your configured `service_principal` until you upgrade your Node Pool. * `sku_tier` - (Optional) The SKU Tier that should be used for this Kubernetes Cluster. Possible values are `Free` and `Paid` (which includes the Uptime SLA). Defaults to `Free`.