diff --git a/armonik/partitions-in-database-cron.tf b/armonik/partitions-in-database-cron.tf index dd96389e..00b09241 100644 --- a/armonik/partitions-in-database-cron.tf +++ b/armonik/partitions-in-database-cron.tf @@ -174,9 +174,9 @@ resource "kubernetes_cron_job_v1" "partitions_in_database" { locals { script_cron = < 1 ? 1 : 0 + + configsvr_node_selector = coalesce(try(var.sharding.configsvr.node_selector, null), var.mongodb.node_selector) + router_node_selector = coalesce(try(var.sharding.router.node_selector, null), var.mongodb.node_selector) + shards_node_selector = coalesce(try(var.sharding.shards.node_selector, null), var.mongodb.node_selector) + arbiter_node_selector = coalesce(try(var.sharding.arbiter.node_selector, null), var.mongodb.node_selector) + + shards_labels = try(var.labels.shardsvr, null) + arbiter_labels = try(var.labels.arbiter, null) + configsvr_labels = try(var.labels.configsvr, null) + router_labels = try(var.labels.router, null) + + timeout = var.timeout * var.sharding.shards.quantity +} + +resource "helm_release" "mongodb" { + name = var.name + namespace = var.namespace + chart = var.mongodb.helm_chart_name + repository = var.mongodb.helm_chart_repository + version = var.mongodb.helm_chart_version + timeout = local.timeout + + values = [ + yamlencode({ + "commonLabels" = var.default_labels + "shards" = var.sharding.shards.quantity + + "image" = { + "registry" = var.mongodb.registry + "repository" = var.mongodb.image + "tag" = var.mongodb.tag + "pullSecrets" = local.image_pull_secrets + } + + "auth" = { + "enabled" = true + } + + "common" = { + "podLabels" = var.default_labels + "mongodbSystemLogVerbosity" = 5 + "initScriptsSecret" = kubernetes_secret.database_init_script.metadata[0].name + "extraEnvVarsSecret" = kubernetes_secret.mongodb_user.metadata[0].name + "extraVolumes" = [{ + name = "mongodb-cert" + secret = { + secretName = kubernetes_secret.mongodb_certificate.metadata[0].name + } + }] + "extraVolumeMounts" = [{ + mountPath = "/mongodb/" + name = "mongodb-cert" + }] + } + + "volumePermissions" = { + "resourcesPreset" = "micro" + } + + "service" = { + "ports.mongodb" = floor(var.mongodb.service_port) + } + + "networkPolicy" = { + "enabled" = true + } + + "configsvr" = { + "replicaCount" = var.sharding.configsvr.replicas + "mongodbExtraFlags" = local.mongodb_extra_flags + "nodeSelector" = local.configsvr_node_selector + + "tolerations" = local.configsvr_node_selector != {} ? [ + for key, value in local.configsvr_node_selector : { + "key" = key + "value" = value + } + ] : [] + + "podLabels" = local.configsvr_labels + "resources" = var.resources.configsvr + + "persistentVolumeClaimRetentionPolicy" = { + "enabled" = true + "whenDeleted" = "Delete" + } + + "podSecurityContext" = { + "fsGroup" = var.security_context.fs_group + } + "containerSecurityContext" = { + "runAsUser" = var.security_context.run_as_user + "runAsGroup" = var.security_context.fs_group + } + } + + "mongos" = { + "replicaCount" = var.sharding.router.replicas + "mongodbExtraFlags" = local.mongodb_extra_flags + "nodeSelector" = local.router_node_selector + + "tolerations" = local.router_node_selector != {} ? [ + for key, value in local.router_node_selector : { + "key" = key + "value" = value + } + ] : [] + + "podLabels" = local.router_labels + "resources" = var.resources.router + } + + "shardsvr" = { + "dataNode" = { + "replicaCount" = var.sharding.shards.replicas + "mongodbExtraFlags" = local.mongodb_extra_flags + "nodeSelector" = local.shards_node_selector + + "tolerations" = local.shards_node_selector != {} ? [ + for key, value in local.shards_node_selector : { + "key" = key + "value" = value + } + ] : [] + + "podLabels" = local.shards_labels + "resources" = var.resources.shards + + "persistentVolumeClaimRetentionPolicy" = { + "enabled" = true + "whenDeleted" = "Delete" + } + + "podSecurityContext" = { + "fsGroup" = var.security_context.fs_group + } + "containerSecurityContext" = { + "runAsUser" = var.security_context.run_as_user + "runAsGroup" = var.security_context.fs_group + } + } + + "arbiter" = { + "replicaCount" = local.arbiter_replicas + "mongodbExtraFlags" = local.mongodb_extra_flags + "nodeSelector" = local.arbiter_node_selector + + "tolerations" = local.arbiter_node_selector != {} ? [ + for key, value in local.arbiter_node_selector : { + "key" = key + "value" = value + } + ] : [] + + "extraVolumeMounts" = [{ + mountPath = "bitnami/mongodb/" + name = "empty-dir" + subPath = "app-volume-dir" + }] + + "podLabels" = local.arbiter_labels + "resources" = var.resources.arbiter + } + + # "metrics" = { + # } + } + }) + ] + + ### PERSISTENCE FOR DATABASE + dynamic "set" { + for_each = !can(coalesce(var.persistence.shards)) ? [1] : [] + + content { + name = "shardsvr.persistence.enabled" + value = "false" + } + } + dynamic "set" { + for_each = can(coalesce(var.persistence.shards.storage_provisioner)) ? [1] : [] + content { + name = "shardsvr.persistence.storageClass" + value = kubernetes_storage_class.shards[0].metadata[0].name + } + } + dynamic "set" { + for_each = can(coalesce(var.persistence.shards)) ? [1] : [] + content { + name = "shardsvr.persistence.accessModes[0]" + value = var.persistence.shards.access_mode[0] + } + } + dynamic "set" { + for_each = can(coalesce(var.persistence.shards.resources.requests.storage)) ? [1] : [] + content { + name = "shardsvr.persistence.size" + value = var.persistence.shards.resources.requests.storage + } + } + + ### PERSISTENCE FOR CONFIG SERVER + dynamic "set" { + for_each = !can(coalesce(var.persistence.configsvr)) ? [1] : [] + content { + name = "configsvr.persistence.enabled" + value = false + } + } + dynamic "set" { + for_each = can(coalesce(var.persistence.configsvr.storage_provisioner)) ? [1] : [] + content { + name = "configsvr.persistence.storageClass" + value = kubernetes_storage_class.configsvr[0].metadata[0].name + } + } + dynamic "set" { + for_each = can(coalesce(var.persistence.configsvr)) ? [1] : [] + content { + name = "configsvr.persistence.accessModes[0]" + value = var.persistence.configsvr.access_mode[0] + } + } + dynamic "set" { + for_each = can(coalesce(var.persistence.configsvr.resources.requests.storage)) ? [1] : [] + content { + name = "configsvr.persistence.size" + value = var.persistence.configsvr.resources.requests.storage + } + } + + # Setting this explicit dependency avoids a deadlock at destruction + depends_on = [kubernetes_storage_class.configsvr, kubernetes_storage_class.shards] +} diff --git a/storage/onpremise/mongodb-sharded/outputs.tf b/storage/onpremise/mongodb-sharded/outputs.tf new file mode 100644 index 00000000..a081480c --- /dev/null +++ b/storage/onpremise/mongodb-sharded/outputs.tf @@ -0,0 +1,82 @@ +output "host" { + description = "Hostname or IP address of MongoDB server" + value = local.mongodb_dns +} + +output "port" { + description = "Port of MongoDB server" + value = var.mongodb.service_port +} + +output "url" { + description = "URL of MongoDB server" + value = local.mongodb_url +} + +output "number_of_replicas" { + description = "Number of replicas for each shard" + value = var.sharding.shards.replicas +} + +output "number_of_shards" { + description = "Number of MongoDB shards" + value = var.sharding.shards +} + +output "user_credentials" { + description = "User credentials of MongoDB" + value = { + secret = kubernetes_secret.mongodb_user.metadata[0].name + data_keys = keys(kubernetes_secret.mongodb_user.data) + } +} + +output "endpoints" { + description = "Endpoints of MongoDB" + value = { + secret = kubernetes_secret.mongodb.metadata[0].name + data_keys = keys(kubernetes_secret.mongodb.data) + } +} + +#new Outputs +output "env" { + description = "Elements to be set as environment variables" + value = ({ + "Components__TableStorage" = "ArmoniK.Adapters.MongoDB.TableStorage" + "MongoDB__Host" = local.mongodb_dns + "MongoDB__Port" = var.mongodb.service_port + "MongoDB__Tls" = "true" + "MongoDB__ReplicaSet" = "rs0" + "MongoDB__DatabaseName" = "database" + "MongoDB__DirectConnection" = "true" + "MongoDB__CAFile" = "/mongodb/certs/chain.pem" + "MongoDB__Sharding" = "true" + "MongoDB__AuthSource" = "admin" + }) +} + +output "mount_secret" { + description = "Secrets to be mounted as volumes" + value = { + "mongo-certificate" = { + secret = kubernetes_secret.mongodb.metadata[0].name + path = "/mongodb/certs/" + mode = "0644" + } + } +} + +output "env_from_secret" { + description = "Environment variables from secrets" + value = { + "MongoDB__User" = { + secret = kubernetes_secret.mongodb_admin.metadata[0].name + field = "MONGO_USERNAME" + }, + "MongoDB__Password" = { + secret = kubernetes_secret.mongodb_admin.metadata[0].name + field = "MONGO_PASSWORD" + } + } +} diff --git a/storage/onpremise/mongodb-sharded/persistence.tf b/storage/onpremise/mongodb-sharded/persistence.tf new file mode 100644 index 00000000..39fa1fd1 --- /dev/null +++ b/storage/onpremise/mongodb-sharded/persistence.tf @@ -0,0 +1,35 @@ +resource "kubernetes_storage_class" "shards" { + # enable if var.persistence.shards is not null and var.persistence.shards.storage_provisioner is neither null nor empty + count = can(coalesce(var.persistence.shards.storage_provisioner)) ? 1 : 0 + metadata { + name = "${var.name}-shards" + labels = { + app = "mongodb" + type = "storage-class" + service = "persistent-volume" + } + } + mount_options = ["tls"] + storage_provisioner = var.persistence.shards.storage_provisioner + reclaim_policy = var.persistence.shards.reclaim_policy + volume_binding_mode = var.persistence.shards.volume_binding_mode + parameters = var.persistence.shards.parameters +} + +resource "kubernetes_storage_class" "configsvr" { + # enable if var.persistence.configsvr.storage_provisioner is neither null nor empty + count = can(coalesce(var.persistence.configsvr.storage_provisioner)) ? 1 : 0 + metadata { + name = "${var.name}-configsvr" + labels = { + app = "mongodb" + type = "storage-class" + service = "persistent-volume" + } + } + mount_options = ["tls"] + storage_provisioner = var.persistence.configsvr.storage_provisioner + reclaim_policy = var.persistence.configsvr.reclaim_policy + volume_binding_mode = var.persistence.configsvr.volume_binding_mode + parameters = var.persistence.configsvr.parameters +} diff --git a/storage/onpremise/mongodb-sharded/secrets.tf b/storage/onpremise/mongodb-sharded/secrets.tf new file mode 100644 index 00000000..e4cef131 --- /dev/null +++ b/storage/onpremise/mongodb-sharded/secrets.tf @@ -0,0 +1,47 @@ +data "kubernetes_secret" "mongodb_credentials" { + metadata { + name = helm_release.mongodb.name + namespace = var.namespace + } + depends_on = [helm_release.mongodb] +} + +resource "kubernetes_secret" "mongodb_admin" { + metadata { + name = "${var.name}-admin" + namespace = var.namespace + } + data = { + MONGO_USERNAME = "root" + MONGO_PASSWORD = local.mongodb_root_password + } +} + +resource "kubernetes_secret" "mongodb_user" { + metadata { + name = "${var.name}-user" + namespace = var.namespace + } + data = { + # When using Bitnami's MongoDB image, MONGODB_USERNAME and MONGODB_PASSWORD are forbidden env variables names, see https://github.com/bitnami/containers/issues/18506 + MONGO_USERNAME = random_string.mongodb_application_user.result + MONGO_PASSWORD = random_password.mongodb_application_password.result + } +} + +resource "kubernetes_secret" "mongodb" { + metadata { + name = "custom-${var.name}" + namespace = helm_release.mongodb.namespace + } + data = { + "chain.pem" = format("%s\n%s", tls_locally_signed_cert.mongodb_certificate.cert_pem, tls_self_signed_cert.root_mongodb.cert_pem) + username = random_string.mongodb_application_user.result + password = random_password.mongodb_application_password.result + host = local.mongodb_dns + port = var.mongodb.service_port + url = local.mongodb_url + number_of_replicas = var.sharding.shards.replicas + number_of_shards = var.sharding.shards.quantity + } +} diff --git a/storage/onpremise/mongodb-sharded/variables.tf b/storage/onpremise/mongodb-sharded/variables.tf new file mode 100644 index 00000000..a3d4cf4c --- /dev/null +++ b/storage/onpremise/mongodb-sharded/variables.tf @@ -0,0 +1,180 @@ +variable "namespace" { + description = "Namespace of ArmoniK resources" + type = string + default = "default" +} + +variable "default_labels" { + description = "Default labels for the MongoDB-related Kubernetes pods" + type = map(string) + default = { + "app" = "storage" + "type" = "table" + } +} + +variable "labels" { + description = "Custom labels for the different MongoDB entities" + type = object({ + shards = optional(map(string)) + arbiter = optional(map(string)) + configsvr = optional(map(string)) + router = optional(map(string)) + }) + default = null +} + +variable "name" { + description = "Name used for the helm chart release and the associated resources, must be shorter than 54 characters" + type = string + default = "mongodb-sharded" # Ideally not change it, cause it makes the create kube service name hard to manage + # resulting in a false connection string output +} + +variable "mongodb" { + description = "Parameters of the MongoDB deployment" + + type = object({ + database_name = optional(string, "database") + helm_chart_repository = optional(string, "oci://registry-1.docker.io/bitnamicharts") + helm_chart_name = optional(string, "mongodb-sharded") + helm_chart_version = string + image = optional(string, "bitnami/mongodb-sharded") + image_pull_secrets = optional(any, [""]) # can be a string or a list of strings + node_selector = optional(map(string), {}) + registry = optional(string) + service_port = optional(number, 27017) + tag = string + }) + + validation { + condition = var.mongodb.service_port >= 0 && var.mongodb.service_port < 65536 + error_message = "MongoDB service port must be a number between 0 included and 65535 included" + } +} + +variable "sharding" { + description = "Parameters specific to the sharded architecture" + type = object({ + shards = optional(object({ + quantity = optional(number, 2) + replicas = optional(number, 1) + node_selector = optional(map(string)) + })) + + arbiter = optional(object({ + node_selector = optional(map(string)) + })) + + router = optional(object({ + replicas = optional(number, 2) + node_selector = optional(map(string)) + })) + + configsvr = optional(object({ + replicas = optional(number, 1) + node_selector = optional(map(string)) + })) + }) + default = { + shards = {} + arbiter = {} + router = {} + configsvr = {} + } +} + +variable "resources" { + description = "Resources requests and limitations (cpu, memory, ephemeral-storage) for different types of MongoDB entities" + type = object({ + shards = optional(object({ + limits = optional(map(string)) + requests = optional(map(string)) + })) + + arbiter = optional(object({ + limits = optional(map(string)) + requests = optional(map(string)) + })) + + configsvr = optional(object({ + limits = optional(map(string)) + requests = optional(map(string)) + })) + + router = optional(object({ + limits = optional(map(string)) + requests = optional(map(string)) + })) + }) + default = { + shards = {} + arbiter = {} + router = {} + configsvr = {} + } +} + +variable "persistence" { + description = "Persistence parameters for MongoDB" + type = object({ + shards = optional(object({ + access_mode = optional(list(string), ["ReadWriteOnce"]) + reclaim_policy = optional(string, "Delete") + storage_provisioner = optional(string, "") + volume_binding_mode = optional(string, "WaitForFirstConsumer") + parameters = optional(map(string), {}) + + resources = optional(object({ + limits = optional(object({ + storage = string + })) + requests = optional(object({ + storage = string + })) + })) + })) + + configsvr = optional(object({ + access_mode = optional(list(string), ["ReadWriteOnce"]) + reclaim_policy = optional(string, "Delete") + storage_provisioner = optional(string, "") + volume_binding_mode = optional(string, "WaitForFirstConsumer") + parameters = optional(map(string), {}) + + resources = optional(object({ + limits = optional(object({ + storage = string + })) + requests = optional(object({ + storage = string + })) + })) + })) + }) + default = null +} + +variable "security_context" { + description = "Security context for MongoDB pods" + type = object({ + run_as_user = number + fs_group = number + }) + default = { + run_as_user = 999 + fs_group = 999 + } +} + +variable "timeout" { + description = "Timeout limit in seconds per shard for the helm release creation" + type = number + default = 180 +} + +variable "validity_period_hours" { + description = "Validity period of the TLS certificate in hours" + type = string + default = "8760" # 1 year +} diff --git a/storage/onpremise/mongodb-sharded/versions.tf b/storage/onpremise/mongodb-sharded/versions.tf new file mode 100644 index 00000000..b9b3b2a3 --- /dev/null +++ b/storage/onpremise/mongodb-sharded/versions.tf @@ -0,0 +1,25 @@ +terraform { + required_version = ">= 1.3" + required_providers { + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + random = { + source = "hashicorp/random" + version = ">= 3.5.1" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.21.1" + } + local = { + source = "hashicorp/local" + version = ">= 2.4.0" + } + tls = { + source = "hashicorp/tls" + version = ">= 4.0.4" + } + } +}